1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // REQUIRES: amdgpu-registered-target 3 4 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-unknown-unknown -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm-bc %s -o %t-ppc-host.bc 5 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple amdgcn-amd-amdhsa -fopenmp-targets=amdgcn-amd-amdhsa -emit-llvm %s -fopenmp-is-device -fopenmp-host-ir-file-path %t-ppc-host.bc -o - | FileCheck %s --check-prefix=IR-GPU 6 7 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-llvm %s -o - | FileCheck %s --check-prefix=IR 8 9 // Check same results after serialization round-trip 10 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -emit-pch -o %t %s 11 // RUN: %clang_cc1 -verify -triple x86_64-pc-linux-gnu -fopenmp -include-pch %t -emit-llvm %s -o - | FileCheck %s --check-prefix=IR-PCH 12 13 // expected-no-diagnostics 14 15 #ifndef HEADER 16 #define HEADER 17 18 typedef void **omp_allocator_handle_t; 19 extern const omp_allocator_handle_t omp_null_allocator; 20 extern const omp_allocator_handle_t omp_default_mem_alloc; 21 extern const omp_allocator_handle_t omp_large_cap_mem_alloc; 22 extern const omp_allocator_handle_t omp_const_mem_alloc; 23 extern const omp_allocator_handle_t omp_high_bw_mem_alloc; 24 extern const omp_allocator_handle_t omp_low_lat_mem_alloc; 25 extern const omp_allocator_handle_t omp_cgroup_mem_alloc; 26 extern const omp_allocator_handle_t omp_pteam_mem_alloc; 27 extern const omp_allocator_handle_t omp_thread_mem_alloc; 28 29 extern int omp_get_thread_num(void); 30 31 #define N 64 32 33 int main() { 34 int x = 0; 35 int device_result[N] = {0}; 36 37 #pragma omp target parallel loop num_threads(N) uses_allocators(omp_pteam_mem_alloc) allocate(omp_pteam_mem_alloc: x) private(x) map(from: device_result) 38 for (int i = 0; i < N; i++) { 39 x = omp_get_thread_num(); 40 device_result[i] = i + x; 41 } 42 } 43 #endif 44 // IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37 45 // IR-GPU-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR0:[0-9]+]] { 46 // IR-GPU-NEXT: entry: 47 // IR-GPU-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 48 // IR-GPU-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 49 // IR-GPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [2 x ptr], align 8, addrspace(5) 50 // IR-GPU-NEXT: [[DEVICE_RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DEVICE_RESULT_ADDR]] to ptr 51 // IR-GPU-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OMP_PTEAM_MEM_ALLOC_ADDR]] to ptr 52 // IR-GPU-NEXT: [[CAPTURED_VARS_ADDRS_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[CAPTURED_VARS_ADDRS]] to ptr 53 // IR-GPU-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8 54 // IR-GPU-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8 55 // IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8 56 // IR-GPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_target_init(ptr addrspacecast (ptr addrspace(1) @[[GLOB1:[0-9]+]] to ptr), i8 2, i1 false) 57 // IR-GPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP1]], -1 58 // IR-GPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[WORKER_EXIT:%.*]] 59 // IR-GPU: user_code.entry: 60 // IR-GPU-NEXT: [[TMP2:%.*]] = call i32 @__kmpc_global_thread_num(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr)) 61 // IR-GPU-NEXT: [[TMP3:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8 62 // IR-GPU-NEXT: [[TMP4:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 0 63 // IR-GPU-NEXT: store ptr [[TMP0]], ptr [[TMP4]], align 8 64 // IR-GPU-NEXT: [[TMP5:%.*]] = getelementptr inbounds [2 x ptr], ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 0, i64 1 65 // IR-GPU-NEXT: store ptr [[TMP3]], ptr [[TMP5]], align 8 66 // IR-GPU-NEXT: call void @__kmpc_parallel_51(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i32 [[TMP2]], i32 1, i32 64, i32 -1, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37_omp_outlined, ptr null, ptr [[CAPTURED_VARS_ADDRS_ASCAST]], i64 2) 67 // IR-GPU-NEXT: call void @__kmpc_target_deinit(ptr addrspacecast (ptr addrspace(1) @[[GLOB1]] to ptr), i8 2) 68 // IR-GPU-NEXT: ret void 69 // IR-GPU: worker.exit: 70 // IR-GPU-NEXT: ret void 71 // 72 // 73 // IR-GPU-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37_omp_outlined 74 // IR-GPU-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR1:[0-9]+]] { 75 // IR-GPU-NEXT: entry: 76 // IR-GPU-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 77 // IR-GPU-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 78 // IR-GPU-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 79 // IR-GPU-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) 80 // IR-GPU-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4, addrspace(5) 81 // IR-GPU-NEXT: [[TMP:%.*]] = alloca i32, align 4, addrspace(5) 82 // IR-GPU-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4, addrspace(5) 83 // IR-GPU-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4, addrspace(5) 84 // IR-GPU-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4, addrspace(5) 85 // IR-GPU-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4, addrspace(5) 86 // IR-GPU-NEXT: [[I:%.*]] = alloca i32, align 4, addrspace(5) 87 // IR-GPU-NEXT: [[DOTGLOBAL_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTGLOBAL_TID__ADDR]] to ptr 88 // IR-GPU-NEXT: [[DOTBOUND_TID__ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTBOUND_TID__ADDR]] to ptr 89 // IR-GPU-NEXT: [[DEVICE_RESULT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DEVICE_RESULT_ADDR]] to ptr 90 // IR-GPU-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OMP_PTEAM_MEM_ALLOC_ADDR]] to ptr 91 // IR-GPU-NEXT: [[DOTOMP_IV_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IV]] to ptr 92 // IR-GPU-NEXT: [[TMP_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[TMP]] to ptr 93 // IR-GPU-NEXT: [[DOTOMP_LB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_LB]] to ptr 94 // IR-GPU-NEXT: [[DOTOMP_UB_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_UB]] to ptr 95 // IR-GPU-NEXT: [[DOTOMP_STRIDE_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_STRIDE]] to ptr 96 // IR-GPU-NEXT: [[DOTOMP_IS_LAST_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DOTOMP_IS_LAST]] to ptr 97 // IR-GPU-NEXT: [[I_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[I]] to ptr 98 // IR-GPU-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8 99 // IR-GPU-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR_ASCAST]], align 8 100 // IR-GPU-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8 101 // IR-GPU-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR_ASCAST]], align 8 102 // IR-GPU-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR_ASCAST]], align 8 103 // IR-GPU-NEXT: store i32 0, ptr [[DOTOMP_LB_ASCAST]], align 4 104 // IR-GPU-NEXT: store i32 63, ptr [[DOTOMP_UB_ASCAST]], align 4 105 // IR-GPU-NEXT: store i32 1, ptr [[DOTOMP_STRIDE_ASCAST]], align 4 106 // IR-GPU-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST_ASCAST]], align 4 107 // IR-GPU-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR_ASCAST]], align 8 108 // IR-GPU-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 109 // IR-GPU-NEXT: call void @__kmpc_for_static_init_4(ptr addrspacecast (ptr addrspace(1) @[[GLOB2:[0-9]+]] to ptr), i32 [[TMP2]], i32 33, ptr [[DOTOMP_IS_LAST_ASCAST]], ptr [[DOTOMP_LB_ASCAST]], ptr [[DOTOMP_UB_ASCAST]], ptr [[DOTOMP_STRIDE_ASCAST]], i32 1, i32 1) 110 // IR-GPU-NEXT: br label [[OMP_DISPATCH_COND:%.*]] 111 // IR-GPU: omp.dispatch.cond: 112 // IR-GPU-NEXT: [[TMP3:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4 113 // IR-GPU-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP3]], 63 114 // IR-GPU-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 115 // IR-GPU: cond.true: 116 // IR-GPU-NEXT: br label [[COND_END:%.*]] 117 // IR-GPU: cond.false: 118 // IR-GPU-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4 119 // IR-GPU-NEXT: br label [[COND_END]] 120 // IR-GPU: cond.end: 121 // IR-GPU-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP4]], [[COND_FALSE]] ] 122 // IR-GPU-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB_ASCAST]], align 4 123 // IR-GPU-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4 124 // IR-GPU-NEXT: store i32 [[TMP5]], ptr [[DOTOMP_IV_ASCAST]], align 4 125 // IR-GPU-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4 126 // IR-GPU-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4 127 // IR-GPU-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP6]], [[TMP7]] 128 // IR-GPU-NEXT: br i1 [[CMP1]], label [[OMP_DISPATCH_BODY:%.*]], label [[OMP_DISPATCH_END:%.*]] 129 // IR-GPU: omp.dispatch.body: 130 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 131 // IR-GPU: omp.inner.for.cond: 132 // IR-GPU-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4 133 // IR-GPU-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4 134 // IR-GPU-NEXT: [[CMP2:%.*]] = icmp sle i32 [[TMP8]], [[TMP9]] 135 // IR-GPU-NEXT: br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 136 // IR-GPU: omp.inner.for.body: 137 // IR-GPU-NEXT: [[TMP10:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4 138 // IR-GPU-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP10]], 1 139 // IR-GPU-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 140 // IR-GPU-NEXT: store i32 [[ADD]], ptr [[I_ASCAST]], align 4 141 // IR-GPU-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv() #[[ATTR5:[0-9]+]] 142 // IR-GPU-NEXT: store i32 [[CALL]], ptr addrspacecast (ptr addrspace(3) @x to ptr), align 4 143 // IR-GPU-NEXT: [[TMP11:%.*]] = load i32, ptr [[I_ASCAST]], align 4 144 // IR-GPU-NEXT: [[TMP12:%.*]] = load i32, ptr addrspacecast (ptr addrspace(3) @x to ptr), align 4 145 // IR-GPU-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP11]], [[TMP12]] 146 // IR-GPU-NEXT: [[TMP13:%.*]] = load i32, ptr [[I_ASCAST]], align 4 147 // IR-GPU-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP13]] to i64 148 // IR-GPU-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] 149 // IR-GPU-NEXT: store i32 [[ADD3]], ptr [[ARRAYIDX]], align 4 150 // IR-GPU-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 151 // IR-GPU: omp.body.continue: 152 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 153 // IR-GPU: omp.inner.for.inc: 154 // IR-GPU-NEXT: [[TMP14:%.*]] = load i32, ptr [[DOTOMP_IV_ASCAST]], align 4 155 // IR-GPU-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], 1 156 // IR-GPU-NEXT: store i32 [[ADD4]], ptr [[DOTOMP_IV_ASCAST]], align 4 157 // IR-GPU-NEXT: br label [[OMP_INNER_FOR_COND]] 158 // IR-GPU: omp.inner.for.end: 159 // IR-GPU-NEXT: br label [[OMP_DISPATCH_INC:%.*]] 160 // IR-GPU: omp.dispatch.inc: 161 // IR-GPU-NEXT: [[TMP15:%.*]] = load i32, ptr [[DOTOMP_LB_ASCAST]], align 4 162 // IR-GPU-NEXT: [[TMP16:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4 163 // IR-GPU-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP15]], [[TMP16]] 164 // IR-GPU-NEXT: store i32 [[ADD5]], ptr [[DOTOMP_LB_ASCAST]], align 4 165 // IR-GPU-NEXT: [[TMP17:%.*]] = load i32, ptr [[DOTOMP_UB_ASCAST]], align 4 166 // IR-GPU-NEXT: [[TMP18:%.*]] = load i32, ptr [[DOTOMP_STRIDE_ASCAST]], align 4 167 // IR-GPU-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP17]], [[TMP18]] 168 // IR-GPU-NEXT: store i32 [[ADD6]], ptr [[DOTOMP_UB_ASCAST]], align 4 169 // IR-GPU-NEXT: br label [[OMP_DISPATCH_COND]] 170 // IR-GPU: omp.dispatch.end: 171 // IR-GPU-NEXT: call void @__kmpc_for_static_fini(ptr addrspacecast (ptr addrspace(1) @[[GLOB2]] to ptr), i32 [[TMP2]]) 172 // IR-GPU-NEXT: ret void 173 // 174 // 175 // IR-LABEL: define {{[^@]+}}@main 176 // IR-SAME: () #[[ATTR0:[0-9]+]] { 177 // IR-NEXT: entry: 178 // IR-NEXT: [[X:%.*]] = alloca i32, align 4 179 // IR-NEXT: [[DEVICE_RESULT:%.*]] = alloca [64 x i32], align 16 180 // IR-NEXT: store i32 0, ptr [[X]], align 4 181 // IR-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false) 182 // IR-NEXT: [[TMP0:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 183 // IR-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]] 184 // IR-NEXT: ret i32 0 185 // 186 // 187 // IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37 188 // IR-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2:[0-9]+]] { 189 // IR-NEXT: entry: 190 // IR-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8 191 // IR-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8 192 // IR-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]]) 193 // IR-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8 194 // IR-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 195 // IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8 196 // IR-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP0]], i32 64) 197 // IR-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 198 // IR-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined, ptr [[TMP1]], ptr [[TMP2]]) 199 // IR-NEXT: ret void 200 // 201 // 202 // IR-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined 203 // IR-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] { 204 // IR-NEXT: entry: 205 // IR-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 206 // IR-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 207 // IR-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8 208 // IR-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8 209 // IR-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 210 // IR-NEXT: [[TMP:%.*]] = alloca i32, align 4 211 // IR-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 212 // IR-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 213 // IR-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 214 // IR-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 215 // IR-NEXT: [[I:%.*]] = alloca i32, align 4 216 // IR-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 217 // IR-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 218 // IR-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8 219 // IR-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 220 // IR-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8 221 // IR-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 222 // IR-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4 223 // IR-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 224 // IR-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 225 // IR-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 226 // IR-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 227 // IR-NEXT: [[TMP3:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 228 // IR-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr [[TMP3]]) 229 // IR-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 230 // IR-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 231 // IR-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 63 232 // IR-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 233 // IR: cond.true: 234 // IR-NEXT: br label [[COND_END:%.*]] 235 // IR: cond.false: 236 // IR-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 237 // IR-NEXT: br label [[COND_END]] 238 // IR: cond.end: 239 // IR-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 240 // IR-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 241 // IR-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 242 // IR-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 243 // IR-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 244 // IR: omp.inner.for.cond: 245 // IR-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 246 // IR-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 247 // IR-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 248 // IR-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 249 // IR: omp.inner.for.cond.cleanup: 250 // IR-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 251 // IR: omp.inner.for.body: 252 // IR-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 253 // IR-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 254 // IR-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 255 // IR-NEXT: store i32 [[ADD]], ptr [[I]], align 4 256 // IR-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv() 257 // IR-NEXT: store i32 [[CALL]], ptr [[DOTX__VOID_ADDR]], align 4 258 // IR-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 259 // IR-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4 260 // IR-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 261 // IR-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 262 // IR-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 263 // IR-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] 264 // IR-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4 265 // IR-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 266 // IR: omp.body.continue: 267 // IR-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 268 // IR: omp.inner.for.inc: 269 // IR-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 270 // IR-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1 271 // IR-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 272 // IR-NEXT: br label [[OMP_INNER_FOR_COND]] 273 // IR: omp.inner.for.end: 274 // IR-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 275 // IR: omp.loop.exit: 276 // IR-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) 277 // IR-NEXT: [[TMP14:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 278 // IR-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr [[TMP14]]) 279 // IR-NEXT: ret void 280 // 281 // 282 // IR-PCH-LABEL: define {{[^@]+}}@main 283 // IR-PCH-SAME: () #[[ATTR0:[0-9]+]] { 284 // IR-PCH-NEXT: entry: 285 // IR-PCH-NEXT: [[X:%.*]] = alloca i32, align 4 286 // IR-PCH-NEXT: [[DEVICE_RESULT:%.*]] = alloca [64 x i32], align 16 287 // IR-PCH-NEXT: store i32 0, ptr [[X]], align 4 288 // IR-PCH-NEXT: call void @llvm.memset.p0.i64(ptr align 16 [[DEVICE_RESULT]], i8 0, i64 256, i1 false) 289 // IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 290 // IR-PCH-NEXT: call void @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37(ptr [[DEVICE_RESULT]], ptr [[TMP0]]) #[[ATTR3:[0-9]+]] 291 // IR-PCH-NEXT: ret i32 0 292 // 293 // 294 // IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37 295 // IR-PCH-SAME: (ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2:[0-9]+]] { 296 // IR-PCH-NEXT: entry: 297 // IR-PCH-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8 298 // IR-PCH-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8 299 // IR-PCH-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB2:[0-9]+]]) 300 // IR-PCH-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8 301 // IR-PCH-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 302 // IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8 303 // IR-PCH-NEXT: call void @__kmpc_push_num_threads(ptr @[[GLOB2]], i32 [[TMP0]], i32 64) 304 // IR-PCH-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 305 // IR-PCH-NEXT: call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr @[[GLOB2]], i32 2, ptr @{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined, ptr [[TMP1]], ptr [[TMP2]]) 306 // IR-PCH-NEXT: ret void 307 // 308 // 309 // IR-PCH-LABEL: define {{[^@]+}}@{{__omp_offloading_[0-9a-z]+_[0-9a-z]+}}_main_l37.omp_outlined 310 // IR-PCH-SAME: (ptr noalias noundef [[DOTGLOBAL_TID_:%.*]], ptr noalias noundef [[DOTBOUND_TID_:%.*]], ptr noundef nonnull align 4 dereferenceable(256) [[DEVICE_RESULT:%.*]], ptr noundef [[OMP_PTEAM_MEM_ALLOC:%.*]]) #[[ATTR2]] { 311 // IR-PCH-NEXT: entry: 312 // IR-PCH-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca ptr, align 8 313 // IR-PCH-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca ptr, align 8 314 // IR-PCH-NEXT: [[DEVICE_RESULT_ADDR:%.*]] = alloca ptr, align 8 315 // IR-PCH-NEXT: [[OMP_PTEAM_MEM_ALLOC_ADDR:%.*]] = alloca ptr, align 8 316 // IR-PCH-NEXT: [[DOTOMP_IV:%.*]] = alloca i32, align 4 317 // IR-PCH-NEXT: [[TMP:%.*]] = alloca i32, align 4 318 // IR-PCH-NEXT: [[DOTOMP_LB:%.*]] = alloca i32, align 4 319 // IR-PCH-NEXT: [[DOTOMP_UB:%.*]] = alloca i32, align 4 320 // IR-PCH-NEXT: [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4 321 // IR-PCH-NEXT: [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4 322 // IR-PCH-NEXT: [[I:%.*]] = alloca i32, align 4 323 // IR-PCH-NEXT: store ptr [[DOTGLOBAL_TID_]], ptr [[DOTGLOBAL_TID__ADDR]], align 8 324 // IR-PCH-NEXT: store ptr [[DOTBOUND_TID_]], ptr [[DOTBOUND_TID__ADDR]], align 8 325 // IR-PCH-NEXT: store ptr [[DEVICE_RESULT]], ptr [[DEVICE_RESULT_ADDR]], align 8 326 // IR-PCH-NEXT: store ptr [[OMP_PTEAM_MEM_ALLOC]], ptr [[OMP_PTEAM_MEM_ALLOC_ADDR]], align 8 327 // IR-PCH-NEXT: [[TMP0:%.*]] = load ptr, ptr [[DEVICE_RESULT_ADDR]], align 8 328 // IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_LB]], align 4 329 // IR-PCH-NEXT: store i32 63, ptr [[DOTOMP_UB]], align 4 330 // IR-PCH-NEXT: store i32 1, ptr [[DOTOMP_STRIDE]], align 4 331 // IR-PCH-NEXT: store i32 0, ptr [[DOTOMP_IS_LAST]], align 4 332 // IR-PCH-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTGLOBAL_TID__ADDR]], align 8 333 // IR-PCH-NEXT: [[TMP2:%.*]] = load i32, ptr [[TMP1]], align 4 334 // IR-PCH-NEXT: [[TMP3:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 335 // IR-PCH-NEXT: [[DOTX__VOID_ADDR:%.*]] = call ptr @__kmpc_alloc(i32 [[TMP2]], i64 4, ptr [[TMP3]]) 336 // IR-PCH-NEXT: call void @__kmpc_for_static_init_4(ptr @[[GLOB1:[0-9]+]], i32 [[TMP2]], i32 34, ptr [[DOTOMP_IS_LAST]], ptr [[DOTOMP_LB]], ptr [[DOTOMP_UB]], ptr [[DOTOMP_STRIDE]], i32 1, i32 1) 337 // IR-PCH-NEXT: [[TMP4:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 338 // IR-PCH-NEXT: [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 63 339 // IR-PCH-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]] 340 // IR-PCH: cond.true: 341 // IR-PCH-NEXT: br label [[COND_END:%.*]] 342 // IR-PCH: cond.false: 343 // IR-PCH-NEXT: [[TMP5:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 344 // IR-PCH-NEXT: br label [[COND_END]] 345 // IR-PCH: cond.end: 346 // IR-PCH-NEXT: [[COND:%.*]] = phi i32 [ 63, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ] 347 // IR-PCH-NEXT: store i32 [[COND]], ptr [[DOTOMP_UB]], align 4 348 // IR-PCH-NEXT: [[TMP6:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4 349 // IR-PCH-NEXT: store i32 [[TMP6]], ptr [[DOTOMP_IV]], align 4 350 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 351 // IR-PCH: omp.inner.for.cond: 352 // IR-PCH-NEXT: [[TMP7:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 353 // IR-PCH-NEXT: [[TMP8:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4 354 // IR-PCH-NEXT: [[CMP1:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]] 355 // IR-PCH-NEXT: br i1 [[CMP1]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]] 356 // IR-PCH: omp.inner.for.cond.cleanup: 357 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_END:%.*]] 358 // IR-PCH: omp.inner.for.body: 359 // IR-PCH-NEXT: [[TMP9:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 360 // IR-PCH-NEXT: [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1 361 // IR-PCH-NEXT: [[ADD:%.*]] = add nsw i32 0, [[MUL]] 362 // IR-PCH-NEXT: store i32 [[ADD]], ptr [[I]], align 4 363 // IR-PCH-NEXT: [[CALL:%.*]] = call noundef i32 @_Z18omp_get_thread_numv() 364 // IR-PCH-NEXT: store i32 [[CALL]], ptr [[DOTX__VOID_ADDR]], align 4 365 // IR-PCH-NEXT: [[TMP10:%.*]] = load i32, ptr [[I]], align 4 366 // IR-PCH-NEXT: [[TMP11:%.*]] = load i32, ptr [[DOTX__VOID_ADDR]], align 4 367 // IR-PCH-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP11]] 368 // IR-PCH-NEXT: [[TMP12:%.*]] = load i32, ptr [[I]], align 4 369 // IR-PCH-NEXT: [[IDXPROM:%.*]] = sext i32 [[TMP12]] to i64 370 // IR-PCH-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [64 x i32], ptr [[TMP0]], i64 0, i64 [[IDXPROM]] 371 // IR-PCH-NEXT: store i32 [[ADD2]], ptr [[ARRAYIDX]], align 4 372 // IR-PCH-NEXT: br label [[OMP_BODY_CONTINUE:%.*]] 373 // IR-PCH: omp.body.continue: 374 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 375 // IR-PCH: omp.inner.for.inc: 376 // IR-PCH-NEXT: [[TMP13:%.*]] = load i32, ptr [[DOTOMP_IV]], align 4 377 // IR-PCH-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP13]], 1 378 // IR-PCH-NEXT: store i32 [[ADD3]], ptr [[DOTOMP_IV]], align 4 379 // IR-PCH-NEXT: br label [[OMP_INNER_FOR_COND]] 380 // IR-PCH: omp.inner.for.end: 381 // IR-PCH-NEXT: br label [[OMP_LOOP_EXIT:%.*]] 382 // IR-PCH: omp.loop.exit: 383 // IR-PCH-NEXT: call void @__kmpc_for_static_fini(ptr @[[GLOB1]], i32 [[TMP2]]) 384 // IR-PCH-NEXT: [[TMP14:%.*]] = load ptr, ptr @omp_pteam_mem_alloc, align 8 385 // IR-PCH-NEXT: call void @__kmpc_free(i32 [[TMP2]], ptr [[DOTX__VOID_ADDR]], ptr [[TMP14]]) 386 // IR-PCH-NEXT: ret void 387 // 388