1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _ 2 // RUN: %clang_cc1 -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1 3 // RUN: %clang_cc1 -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 4 // RUN: %clang_cc1 -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK2 5 // RUN: %clang_cc1 -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3 6 // RUN: %clang_cc1 -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4 7 8 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 9 // RUN: %clang_cc1 -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s 10 // RUN: %clang_cc1 -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 11 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 12 // RUN: %clang_cc1 -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}" 13 // expected-no-diagnostics 14 #ifndef HEADER 15 #define HEADER 16 17 struct St { 18 int a, b; 19 St() : a(0), b(0) {} 20 St(const St &st) : a(st.a + st.b), b(0) {} 21 ~St() {} 22 }; 23 24 volatile int g = 1212; 25 26 template <class T> 27 struct S { 28 T f; 29 S(T a) : f(a + g) {} 30 S() : f(g) {} 31 S(const S &s, St t = St()) : f(s.f + t.a) {} 32 operator T() { return T(); } 33 ~S() {} 34 }; 35 36 37 template <typename T> 38 T tmain() { 39 S<T> test; 40 T t_var = T(); 41 T vec[] = {1, 2}; 42 S<T> s_arr[] = {1, 2}; 43 S<T> var(3); 44 #pragma omp parallel 45 #pragma omp sections firstprivate(t_var, vec, s_arr, var) 46 { 47 vec[0] = t_var; 48 #pragma omp section 49 s_arr[0] = var; 50 } 51 return T(); 52 } 53 54 S<float> test; 55 int t_var = 333; 56 int vec[] = {1, 2}; 57 S<float> s_arr[] = {1, 2}; 58 S<float> var(3); 59 60 int main() { 61 static int sivar; 62 #ifdef LAMBDA 63 [&]() { 64 #pragma omp parallel 65 #pragma omp sections firstprivate(g, sivar) 66 { 67 // Skip temp vars for loop 68 69 70 71 { 72 g = 1; 73 sivar = 10; 74 } 75 #pragma omp section 76 [&]() { 77 g = 2; 78 sivar = 20; 79 }(); 80 } 81 }(); 82 return 0; 83 #elif defined(BLOCKS) 84 ^{ 85 #pragma omp parallel 86 #pragma omp sections firstprivate(g, sivar) 87 { 88 // Skip temp vars for loop 89 90 91 92 { 93 g = 1; 94 sivar = 10; 95 } 96 #pragma omp section 97 ^{ 98 g = 2; 99 sivar = 20; 100 }(); 101 } 102 }(); 103 return 0; 104 #else 105 #pragma omp sections firstprivate(t_var, vec, s_arr, var, sivar) nowait 106 { 107 { 108 vec[0] = t_var; 109 s_arr[0] = var; 110 sivar = 31; 111 } 112 } 113 return tmain<int>(); 114 #endif 115 } 116 117 118 // firstprivate t_var(t_var) 119 120 // firstprivate vec(vec) 121 122 // firstprivate s_arr(s_arr) 123 124 // firstprivate var(var) 125 126 // firstprivate isvar 127 128 129 // ~(firstprivate var), ~(firstprivate s_arr) 130 131 132 133 // Skip temp vars for loop 134 135 136 // firstprivate t_var(t_var) 137 138 // firstprivate vec(vec) 139 140 // firstprivate s_arr(s_arr) 141 142 // firstprivate var(var) 143 144 // No synchronization for initialization. 145 146 147 // ~(firstprivate var), ~(firstprivate s_arr) 148 #endif 149 150 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init 151 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 152 // CHECK1-NEXT: entry: 153 // CHECK1-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef @test) 154 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 155 // CHECK1-NEXT: ret void 156 // 157 // 158 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 159 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 160 // CHECK1-NEXT: entry: 161 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 162 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 163 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 164 // CHECK1-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef [[THIS1]]) 165 // CHECK1-NEXT: ret void 166 // 167 // 168 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 169 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 170 // CHECK1-NEXT: entry: 171 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 172 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 173 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 174 // CHECK1-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef [[THIS1]]) #[[ATTR2]] 175 // CHECK1-NEXT: ret void 176 // 177 // 178 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 179 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 180 // CHECK1-NEXT: entry: 181 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 182 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 183 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 184 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 185 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 186 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 187 // CHECK1-NEXT: store float [[CONV]], float* [[F]], align 4 188 // CHECK1-NEXT: ret void 189 // 190 // 191 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 192 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 193 // CHECK1-NEXT: entry: 194 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 195 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 196 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 197 // CHECK1-NEXT: ret void 198 // 199 // 200 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 201 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 202 // CHECK1-NEXT: entry: 203 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00) 204 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00) 205 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 206 // CHECK1-NEXT: ret void 207 // 208 // 209 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 210 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 211 // CHECK1-NEXT: entry: 212 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 213 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 214 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 215 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4 216 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 217 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 218 // CHECK1-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef [[THIS1]], float noundef [[TMP0]]) 219 // CHECK1-NEXT: ret void 220 // 221 // 222 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 223 // CHECK1-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 224 // CHECK1-NEXT: entry: 225 // CHECK1-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 226 // CHECK1-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 227 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 228 // CHECK1: arraydestroy.body: 229 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 230 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 231 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 232 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 233 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 234 // CHECK1: arraydestroy.done1: 235 // CHECK1-NEXT: ret void 236 // 237 // 238 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 239 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 240 // CHECK1-NEXT: entry: 241 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 242 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 243 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 244 // CHECK1-NEXT: store float [[A]], float* [[A_ADDR]], align 4 245 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 246 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 247 // CHECK1-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 248 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 249 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 250 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 251 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4 252 // CHECK1-NEXT: ret void 253 // 254 // 255 // CHECK1-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 256 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 257 // CHECK1-NEXT: entry: 258 // CHECK1-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef @var, float noundef 3.000000e+00) 259 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 260 // CHECK1-NEXT: ret void 261 // 262 // 263 // CHECK1-LABEL: define {{[^@]+}}@main 264 // CHECK1-SAME: () #[[ATTR3:[0-9]+]] { 265 // CHECK1-NEXT: entry: 266 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 267 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 268 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 269 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 270 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 271 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 272 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 273 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 274 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4 275 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 276 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4 277 // CHECK1-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4 278 // CHECK1-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 279 // CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 280 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 281 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 282 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4 283 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 284 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 285 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 286 // CHECK1-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4 287 // CHECK1-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 288 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false) 289 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 290 // CHECK1-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2 291 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]] 292 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 293 // CHECK1: omp.arraycpy.body: 294 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 295 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 296 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP]]) 297 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]]) 298 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP]]) #[[ATTR2]] 299 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 300 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 301 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]] 302 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]] 303 // CHECK1: omp.arraycpy.done1: 304 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP2]]) 305 // CHECK1-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef [[VAR]], %struct.S* noundef nonnull align 4 dereferenceable(4) @var, %struct.St* noundef [[AGG_TMP2]]) 306 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP2]]) #[[ATTR2]] 307 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 308 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4 309 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 310 // CHECK1-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 311 // CHECK1-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 0 312 // CHECK1-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 0 313 // CHECK1-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 314 // CHECK1-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 315 // CHECK1-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 316 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 317 // CHECK1: omp.inner.for.cond: 318 // CHECK1-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 319 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 320 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 321 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 322 // CHECK1: omp.inner.for.body: 323 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 324 // CHECK1-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 325 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 326 // CHECK1-NEXT: ] 327 // CHECK1: .omp.sections.case: 328 // CHECK1-NEXT: [[TMP12:%.*]] = load i32, i32* [[T_VAR]], align 4 329 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0 330 // CHECK1-NEXT: store i32 [[TMP12]], i32* [[ARRAYIDX]], align 4 331 // CHECK1-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0 332 // CHECK1-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8* 333 // CHECK1-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[VAR]] to i8* 334 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i64 4, i1 false) 335 // CHECK1-NEXT: store i32 31, i32* [[SIVAR]], align 4 336 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 337 // CHECK1: .omp.sections.exit: 338 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 339 // CHECK1: omp.inner.for.inc: 340 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 341 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1 342 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 343 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 344 // CHECK1: omp.inner.for.end: 345 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 346 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[VAR]]) #[[ATTR2]] 347 // CHECK1-NEXT: [[ARRAY_BEGIN4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 348 // CHECK1-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN4]], i64 2 349 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 350 // CHECK1: arraydestroy.body: 351 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP16]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 352 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 353 // CHECK1-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 354 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN4]] 355 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE5:%.*]], label [[ARRAYDESTROY_BODY]] 356 // CHECK1: arraydestroy.done5: 357 // CHECK1-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 358 // CHECK1-NEXT: ret i32 [[CALL]] 359 // 360 // 361 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC1Ev 362 // CHECK1-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 363 // CHECK1-NEXT: entry: 364 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 365 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 366 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 367 // CHECK1-NEXT: call void @_ZN2StC2Ev(%struct.St* noundef [[THIS1]]) 368 // CHECK1-NEXT: ret void 369 // 370 // 371 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St 372 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 373 // CHECK1-NEXT: entry: 374 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 375 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 376 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 377 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 378 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 379 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 380 // CHECK1-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* noundef [[THIS1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]]) 381 // CHECK1-NEXT: ret void 382 // 383 // 384 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD1Ev 385 // CHECK1-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 386 // CHECK1-NEXT: entry: 387 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 388 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 389 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 390 // CHECK1-NEXT: call void @_ZN2StD2Ev(%struct.St* noundef [[THIS1]]) #[[ATTR2]] 391 // CHECK1-NEXT: ret void 392 // 393 // 394 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 395 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] { 396 // CHECK1-NEXT: entry: 397 // CHECK1-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 398 // CHECK1-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 399 // CHECK1-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 400 // CHECK1-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 401 // CHECK1-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 402 // CHECK1-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4 403 // CHECK1-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef [[TEST]]) 404 // CHECK1-NEXT: store i32 0, i32* [[T_VAR]], align 4 405 // CHECK1-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 406 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 407 // CHECK1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 408 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[ARRAYINIT_BEGIN]], i32 noundef 1) 409 // CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 410 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[ARRAYINIT_ELEMENT]], i32 noundef 2) 411 // CHECK1-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[VAR]], i32 noundef 3) 412 // CHECK1-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]]) 413 // CHECK1-NEXT: store i32 0, i32* [[RETVAL]], align 4 414 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[VAR]]) #[[ATTR2]] 415 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 416 // CHECK1-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 417 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 418 // CHECK1: arraydestroy.body: 419 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 420 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 421 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 422 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 423 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 424 // CHECK1: arraydestroy.done1: 425 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[TEST]]) #[[ATTR2]] 426 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4 427 // CHECK1-NEXT: ret i32 [[TMP2]] 428 // 429 // 430 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StC2Ev 431 // CHECK1-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 432 // CHECK1-NEXT: entry: 433 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 434 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 435 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 436 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0 437 // CHECK1-NEXT: store i32 0, i32* [[A]], align 4 438 // CHECK1-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1 439 // CHECK1-NEXT: store i32 0, i32* [[B]], align 4 440 // CHECK1-NEXT: ret void 441 // 442 // 443 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St 444 // CHECK1-SAME: (%struct.S* noundef [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 445 // CHECK1-NEXT: entry: 446 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 447 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 448 // CHECK1-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 449 // CHECK1-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 450 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 451 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 452 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 453 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0 454 // CHECK1-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4 455 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 456 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 457 // CHECK1-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float 458 // CHECK1-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]] 459 // CHECK1-NEXT: store float [[ADD]], float* [[F]], align 4 460 // CHECK1-NEXT: ret void 461 // 462 // 463 // CHECK1-LABEL: define {{[^@]+}}@_ZN2StD2Ev 464 // CHECK1-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 465 // CHECK1-NEXT: entry: 466 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 467 // CHECK1-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 468 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 469 // CHECK1-NEXT: ret void 470 // 471 // 472 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 473 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 474 // CHECK1-NEXT: entry: 475 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 476 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 477 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 478 // CHECK1-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef [[THIS1]]) 479 // CHECK1-NEXT: ret void 480 // 481 // 482 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 483 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 484 // CHECK1-NEXT: entry: 485 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 486 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 487 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 488 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 489 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 490 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 491 // CHECK1-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef [[THIS1]], i32 noundef [[TMP0]]) 492 // CHECK1-NEXT: ret void 493 // 494 // 495 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined. 496 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR6:[0-9]+]] { 497 // CHECK1-NEXT: entry: 498 // CHECK1-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 499 // CHECK1-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 500 // CHECK1-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 501 // CHECK1-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 502 // CHECK1-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 503 // CHECK1-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 504 // CHECK1-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 505 // CHECK1-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 506 // CHECK1-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 507 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 508 // CHECK1-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 509 // CHECK1-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 510 // CHECK1-NEXT: [[VEC2:%.*]] = alloca [2 x i32], align 4 511 // CHECK1-NEXT: [[S_ARR3:%.*]] = alloca [2 x %struct.S.0], align 4 512 // CHECK1-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 513 // CHECK1-NEXT: [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 514 // CHECK1-NEXT: [[AGG_TMP6:%.*]] = alloca [[STRUCT_ST]], align 4 515 // CHECK1-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 516 // CHECK1-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 517 // CHECK1-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 518 // CHECK1-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8 519 // CHECK1-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 520 // CHECK1-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8 521 // CHECK1-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 522 // CHECK1-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8 523 // CHECK1-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 524 // CHECK1-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8 525 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 526 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4 527 // CHECK1-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 528 // CHECK1-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 529 // CHECK1-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4 530 // CHECK1-NEXT: store i32 [[TMP4]], i32* [[T_VAR1]], align 4 531 // CHECK1-NEXT: [[TMP5:%.*]] = bitcast [2 x i32]* [[VEC2]] to i8* 532 // CHECK1-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8* 533 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP5]], i8* align 4 [[TMP6]], i64 8, i1 false) 534 // CHECK1-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0 535 // CHECK1-NEXT: [[TMP7:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0* 536 // CHECK1-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 537 // CHECK1-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP8]] 538 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 539 // CHECK1: omp.arraycpy.body: 540 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 541 // CHECK1-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 542 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP]]) 543 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]]) 544 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP]]) #[[ATTR2]] 545 // CHECK1-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 546 // CHECK1-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 547 // CHECK1-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 548 // CHECK1-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]] 549 // CHECK1: omp.arraycpy.done4: 550 // CHECK1-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP6]]) 551 // CHECK1-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef [[VAR5]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP3]], %struct.St* noundef [[AGG_TMP6]]) 552 // CHECK1-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP6]]) #[[ATTR2]] 553 // CHECK1-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 554 // CHECK1-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 555 // CHECK1-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 556 // CHECK1-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 557 // CHECK1-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], 1 558 // CHECK1-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 1 559 // CHECK1-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 560 // CHECK1-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 561 // CHECK1-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 562 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 563 // CHECK1: omp.inner.for.cond: 564 // CHECK1-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 565 // CHECK1-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 566 // CHECK1-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 567 // CHECK1-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 568 // CHECK1: omp.inner.for.body: 569 // CHECK1-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 570 // CHECK1-NEXT: switch i32 [[TMP17]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 571 // CHECK1-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 572 // CHECK1-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE7:%.*]] 573 // CHECK1-NEXT: ] 574 // CHECK1: .omp.sections.case: 575 // CHECK1-NEXT: [[TMP18:%.*]] = load i32, i32* [[T_VAR1]], align 4 576 // CHECK1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 0 577 // CHECK1-NEXT: store i32 [[TMP18]], i32* [[ARRAYIDX]], align 4 578 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 579 // CHECK1: .omp.sections.case7: 580 // CHECK1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i64 0, i64 0 581 // CHECK1-NEXT: [[TMP19:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8* 582 // CHECK1-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[VAR5]] to i8* 583 // CHECK1-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false) 584 // CHECK1-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 585 // CHECK1: .omp.sections.exit: 586 // CHECK1-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 587 // CHECK1: omp.inner.for.inc: 588 // CHECK1-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 589 // CHECK1-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1 590 // CHECK1-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 591 // CHECK1-NEXT: br label [[OMP_INNER_FOR_COND]] 592 // CHECK1: omp.inner.for.end: 593 // CHECK1-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 594 // CHECK1-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4 595 // CHECK1-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]]) 596 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[VAR5]]) #[[ATTR2]] 597 // CHECK1-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0 598 // CHECK1-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i64 2 599 // CHECK1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 600 // CHECK1: arraydestroy.body: 601 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP24]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 602 // CHECK1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 603 // CHECK1-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 604 // CHECK1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] 605 // CHECK1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE10:%.*]], label [[ARRAYDESTROY_BODY]] 606 // CHECK1: arraydestroy.done10: 607 // CHECK1-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 608 // CHECK1-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4 609 // CHECK1-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP26]]) 610 // CHECK1-NEXT: ret void 611 // 612 // 613 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St 614 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 615 // CHECK1-NEXT: entry: 616 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 617 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 618 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 619 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 620 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 621 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 622 // CHECK1-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* noundef [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]]) 623 // CHECK1-NEXT: ret void 624 // 625 // 626 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 627 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 628 // CHECK1-NEXT: entry: 629 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 630 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 631 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 632 // CHECK1-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef [[THIS1]]) #[[ATTR2]] 633 // CHECK1-NEXT: ret void 634 // 635 // 636 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 637 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 638 // CHECK1-NEXT: entry: 639 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 640 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 641 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 642 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 643 // CHECK1-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 644 // CHECK1-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 645 // CHECK1-NEXT: ret void 646 // 647 // 648 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 649 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 650 // CHECK1-NEXT: entry: 651 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 652 // CHECK1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 653 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 654 // CHECK1-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 655 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 656 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 657 // CHECK1-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 658 // CHECK1-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 659 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 660 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4 661 // CHECK1-NEXT: ret void 662 // 663 // 664 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St 665 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 666 // CHECK1-NEXT: entry: 667 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 668 // CHECK1-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 669 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 670 // CHECK1-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 671 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 672 // CHECK1-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 673 // CHECK1-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 674 // CHECK1-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0 675 // CHECK1-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4 676 // CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 677 // CHECK1-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 678 // CHECK1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]] 679 // CHECK1-NEXT: store i32 [[ADD]], i32* [[F]], align 4 680 // CHECK1-NEXT: ret void 681 // 682 // 683 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 684 // CHECK1-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 685 // CHECK1-NEXT: entry: 686 // CHECK1-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 687 // CHECK1-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 688 // CHECK1-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 689 // CHECK1-NEXT: ret void 690 // 691 // 692 // CHECK1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp 693 // CHECK1-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 694 // CHECK1-NEXT: entry: 695 // CHECK1-NEXT: call void @__cxx_global_var_init() 696 // CHECK1-NEXT: call void @__cxx_global_var_init.1() 697 // CHECK1-NEXT: call void @__cxx_global_var_init.2() 698 // CHECK1-NEXT: ret void 699 // 700 // 701 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init 702 // CHECK2-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 703 // CHECK2-NEXT: entry: 704 // CHECK2-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef @test) 705 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 706 // CHECK2-NEXT: ret void 707 // 708 // 709 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 710 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 711 // CHECK2-NEXT: entry: 712 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 713 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 714 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 715 // CHECK2-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef [[THIS1]]) 716 // CHECK2-NEXT: ret void 717 // 718 // 719 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 720 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 721 // CHECK2-NEXT: entry: 722 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 723 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 724 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 725 // CHECK2-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef [[THIS1]]) #[[ATTR2]] 726 // CHECK2-NEXT: ret void 727 // 728 // 729 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 730 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 731 // CHECK2-NEXT: entry: 732 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 733 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 734 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 735 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 736 // CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 737 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 738 // CHECK2-NEXT: store float [[CONV]], float* [[F]], align 4 739 // CHECK2-NEXT: ret void 740 // 741 // 742 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 743 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 744 // CHECK2-NEXT: entry: 745 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 746 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 747 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 748 // CHECK2-NEXT: ret void 749 // 750 // 751 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 752 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 753 // CHECK2-NEXT: entry: 754 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00) 755 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00) 756 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 757 // CHECK2-NEXT: ret void 758 // 759 // 760 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 761 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 762 // CHECK2-NEXT: entry: 763 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 764 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 765 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 766 // CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4 767 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 768 // CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 769 // CHECK2-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef [[THIS1]], float noundef [[TMP0]]) 770 // CHECK2-NEXT: ret void 771 // 772 // 773 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 774 // CHECK2-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 775 // CHECK2-NEXT: entry: 776 // CHECK2-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 777 // CHECK2-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 778 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 779 // CHECK2: arraydestroy.body: 780 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 781 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 782 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 783 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 784 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 785 // CHECK2: arraydestroy.done1: 786 // CHECK2-NEXT: ret void 787 // 788 // 789 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 790 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 791 // CHECK2-NEXT: entry: 792 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 793 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 794 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 795 // CHECK2-NEXT: store float [[A]], float* [[A_ADDR]], align 4 796 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 797 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 798 // CHECK2-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 799 // CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 800 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 801 // CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 802 // CHECK2-NEXT: store float [[ADD]], float* [[F]], align 4 803 // CHECK2-NEXT: ret void 804 // 805 // 806 // CHECK2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 807 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 808 // CHECK2-NEXT: entry: 809 // CHECK2-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef @var, float noundef 3.000000e+00) 810 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 811 // CHECK2-NEXT: ret void 812 // 813 // 814 // CHECK2-LABEL: define {{[^@]+}}@main 815 // CHECK2-SAME: () #[[ATTR3:[0-9]+]] { 816 // CHECK2-NEXT: entry: 817 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 818 // CHECK2-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 819 // CHECK2-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 820 // CHECK2-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 821 // CHECK2-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 822 // CHECK2-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 823 // CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 824 // CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 825 // CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S], align 4 826 // CHECK2-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 827 // CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S:%.*]], align 4 828 // CHECK2-NEXT: [[AGG_TMP2:%.*]] = alloca [[STRUCT_ST]], align 4 829 // CHECK2-NEXT: [[SIVAR:%.*]] = alloca i32, align 4 830 // CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB2:[0-9]+]]) 831 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 832 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 833 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_UB_]], align 4 834 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 835 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 836 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* @t_var, align 4 837 // CHECK2-NEXT: store i32 [[TMP1]], i32* [[T_VAR]], align 4 838 // CHECK2-NEXT: [[TMP2:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 839 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP2]], i8* align 4 bitcast ([2 x i32]* @vec to i8*), i64 8, i1 false) 840 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 841 // CHECK2-NEXT: [[TMP3:%.*]] = getelementptr [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2 842 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP3]] 843 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 844 // CHECK2: omp.arraycpy.body: 845 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 846 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 847 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP]]) 848 // CHECK2-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]]) 849 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP]]) #[[ATTR2]] 850 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 851 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 852 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP3]] 853 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]] 854 // CHECK2: omp.arraycpy.done1: 855 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP2]]) 856 // CHECK2-NEXT: call void @_ZN1SIfEC1ERKS0_2St(%struct.S* noundef [[VAR]], %struct.S* noundef nonnull align 4 dereferenceable(4) @var, %struct.St* noundef [[AGG_TMP2]]) 857 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP2]]) #[[ATTR2]] 858 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 859 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[SIVAR]], align 4 860 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP0]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 861 // CHECK2-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 862 // CHECK2-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 0 863 // CHECK2-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 0 864 // CHECK2-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 865 // CHECK2-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 866 // CHECK2-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 867 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 868 // CHECK2: omp.inner.for.cond: 869 // CHECK2-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 870 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 871 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 872 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 873 // CHECK2: omp.inner.for.body: 874 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 875 // CHECK2-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 876 // CHECK2-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 877 // CHECK2-NEXT: ] 878 // CHECK2: .omp.sections.case: 879 // CHECK2-NEXT: [[TMP12:%.*]] = load i32, i32* [[T_VAR]], align 4 880 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0 881 // CHECK2-NEXT: store i32 [[TMP12]], i32* [[ARRAYIDX]], align 4 882 // CHECK2-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0 883 // CHECK2-NEXT: [[TMP13:%.*]] = bitcast %struct.S* [[ARRAYIDX3]] to i8* 884 // CHECK2-NEXT: [[TMP14:%.*]] = bitcast %struct.S* [[VAR]] to i8* 885 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP13]], i8* align 4 [[TMP14]], i64 4, i1 false) 886 // CHECK2-NEXT: store i32 31, i32* [[SIVAR]], align 4 887 // CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 888 // CHECK2: .omp.sections.exit: 889 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 890 // CHECK2: omp.inner.for.inc: 891 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 892 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP15]], 1 893 // CHECK2-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 894 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 895 // CHECK2: omp.inner.for.end: 896 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]]) 897 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[VAR]]) #[[ATTR2]] 898 // CHECK2-NEXT: [[ARRAY_BEGIN4:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0 899 // CHECK2-NEXT: [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN4]], i64 2 900 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 901 // CHECK2: arraydestroy.body: 902 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP16]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 903 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 904 // CHECK2-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 905 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN4]] 906 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE5:%.*]], label [[ARRAYDESTROY_BODY]] 907 // CHECK2: arraydestroy.done5: 908 // CHECK2-NEXT: [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v() 909 // CHECK2-NEXT: ret i32 [[CALL]] 910 // 911 // 912 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StC1Ev 913 // CHECK2-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 914 // CHECK2-NEXT: entry: 915 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 916 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 917 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 918 // CHECK2-NEXT: call void @_ZN2StC2Ev(%struct.St* noundef [[THIS1]]) 919 // CHECK2-NEXT: ret void 920 // 921 // 922 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC1ERKS0_2St 923 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 924 // CHECK2-NEXT: entry: 925 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 926 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 927 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 928 // CHECK2-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 929 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 930 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 931 // CHECK2-NEXT: call void @_ZN1SIfEC2ERKS0_2St(%struct.S* noundef [[THIS1]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]]) 932 // CHECK2-NEXT: ret void 933 // 934 // 935 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StD1Ev 936 // CHECK2-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 937 // CHECK2-NEXT: entry: 938 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 939 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 940 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 941 // CHECK2-NEXT: call void @_ZN2StD2Ev(%struct.St* noundef [[THIS1]]) #[[ATTR2]] 942 // CHECK2-NEXT: ret void 943 // 944 // 945 // CHECK2-LABEL: define {{[^@]+}}@_Z5tmainIiET_v 946 // CHECK2-SAME: () #[[ATTR5:[0-9]+]] { 947 // CHECK2-NEXT: entry: 948 // CHECK2-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 949 // CHECK2-NEXT: [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 950 // CHECK2-NEXT: [[T_VAR:%.*]] = alloca i32, align 4 951 // CHECK2-NEXT: [[VEC:%.*]] = alloca [2 x i32], align 4 952 // CHECK2-NEXT: [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4 953 // CHECK2-NEXT: [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4 954 // CHECK2-NEXT: call void @_ZN1SIiEC1Ev(%struct.S.0* noundef [[TEST]]) 955 // CHECK2-NEXT: store i32 0, i32* [[T_VAR]], align 4 956 // CHECK2-NEXT: [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8* 957 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false) 958 // CHECK2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0 959 // CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[ARRAYINIT_BEGIN]], i32 noundef 1) 960 // CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1 961 // CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[ARRAYINIT_ELEMENT]], i32 noundef 2) 962 // CHECK2-NEXT: call void @_ZN1SIiEC1Ei(%struct.S.0* noundef [[VAR]], i32 noundef 3) 963 // CHECK2-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB2]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*, [2 x i32]*, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* [[T_VAR]], [2 x i32]* [[VEC]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]]) 964 // CHECK2-NEXT: store i32 0, i32* [[RETVAL]], align 4 965 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[VAR]]) #[[ATTR2]] 966 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0 967 // CHECK2-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 968 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 969 // CHECK2: arraydestroy.body: 970 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP1]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 971 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 972 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 973 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]] 974 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 975 // CHECK2: arraydestroy.done1: 976 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[TEST]]) #[[ATTR2]] 977 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[RETVAL]], align 4 978 // CHECK2-NEXT: ret i32 [[TMP2]] 979 // 980 // 981 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StC2Ev 982 // CHECK2-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 983 // CHECK2-NEXT: entry: 984 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 985 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 986 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 987 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[THIS1]], i32 0, i32 0 988 // CHECK2-NEXT: store i32 0, i32* [[A]], align 4 989 // CHECK2-NEXT: [[B:%.*]] = getelementptr inbounds [[STRUCT_ST]], %struct.St* [[THIS1]], i32 0, i32 1 990 // CHECK2-NEXT: store i32 0, i32* [[B]], align 4 991 // CHECK2-NEXT: ret void 992 // 993 // 994 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIfEC2ERKS0_2St 995 // CHECK2-SAME: (%struct.S* noundef [[THIS:%.*]], %struct.S* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 996 // CHECK2-NEXT: entry: 997 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 998 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S*, align 8 999 // CHECK2-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1000 // CHECK2-NEXT: store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8 1001 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1002 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1003 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8 1004 // CHECK2-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0 1005 // CHECK2-NEXT: [[TMP1:%.*]] = load float, float* [[F2]], align 4 1006 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 1007 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1008 // CHECK2-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP2]] to float 1009 // CHECK2-NEXT: [[ADD:%.*]] = fadd float [[TMP1]], [[CONV]] 1010 // CHECK2-NEXT: store float [[ADD]], float* [[F]], align 4 1011 // CHECK2-NEXT: ret void 1012 // 1013 // 1014 // CHECK2-LABEL: define {{[^@]+}}@_ZN2StD2Ev 1015 // CHECK2-SAME: (%struct.St* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1016 // CHECK2-NEXT: entry: 1017 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.St*, align 8 1018 // CHECK2-NEXT: store %struct.St* [[THIS]], %struct.St** [[THIS_ADDR]], align 8 1019 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.St*, %struct.St** [[THIS_ADDR]], align 8 1020 // CHECK2-NEXT: ret void 1021 // 1022 // 1023 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev 1024 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1025 // CHECK2-NEXT: entry: 1026 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1027 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1028 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1029 // CHECK2-NEXT: call void @_ZN1SIiEC2Ev(%struct.S.0* noundef [[THIS1]]) 1030 // CHECK2-NEXT: ret void 1031 // 1032 // 1033 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei 1034 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1035 // CHECK2-NEXT: entry: 1036 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1037 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1038 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1039 // CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1040 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1041 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 1042 // CHECK2-NEXT: call void @_ZN1SIiEC2Ei(%struct.S.0* noundef [[THIS1]], i32 noundef [[TMP0]]) 1043 // CHECK2-NEXT: ret void 1044 // 1045 // 1046 // CHECK2-LABEL: define {{[^@]+}}@.omp_outlined. 1047 // CHECK2-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[T_VAR:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR6:[0-9]+]] { 1048 // CHECK2-NEXT: entry: 1049 // CHECK2-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1050 // CHECK2-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1051 // CHECK2-NEXT: [[T_VAR_ADDR:%.*]] = alloca i32*, align 8 1052 // CHECK2-NEXT: [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8 1053 // CHECK2-NEXT: [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8 1054 // CHECK2-NEXT: [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8 1055 // CHECK2-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 1056 // CHECK2-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 1057 // CHECK2-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 1058 // CHECK2-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 1059 // CHECK2-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 1060 // CHECK2-NEXT: [[T_VAR1:%.*]] = alloca i32, align 4 1061 // CHECK2-NEXT: [[VEC2:%.*]] = alloca [2 x i32], align 4 1062 // CHECK2-NEXT: [[S_ARR3:%.*]] = alloca [2 x %struct.S.0], align 4 1063 // CHECK2-NEXT: [[AGG_TMP:%.*]] = alloca [[STRUCT_ST:%.*]], align 4 1064 // CHECK2-NEXT: [[VAR5:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4 1065 // CHECK2-NEXT: [[AGG_TMP6:%.*]] = alloca [[STRUCT_ST]], align 4 1066 // CHECK2-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1067 // CHECK2-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1068 // CHECK2-NEXT: store i32* [[T_VAR]], i32** [[T_VAR_ADDR]], align 8 1069 // CHECK2-NEXT: store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8 1070 // CHECK2-NEXT: store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 1071 // CHECK2-NEXT: store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8 1072 // CHECK2-NEXT: [[TMP0:%.*]] = load i32*, i32** [[T_VAR_ADDR]], align 8 1073 // CHECK2-NEXT: [[TMP1:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8 1074 // CHECK2-NEXT: [[TMP2:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8 1075 // CHECK2-NEXT: [[TMP3:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8 1076 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1077 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1078 // CHECK2-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 1079 // CHECK2-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 1080 // CHECK2-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP0]], align 4 1081 // CHECK2-NEXT: store i32 [[TMP4]], i32* [[T_VAR1]], align 4 1082 // CHECK2-NEXT: [[TMP5:%.*]] = bitcast [2 x i32]* [[VEC2]] to i8* 1083 // CHECK2-NEXT: [[TMP6:%.*]] = bitcast [2 x i32]* [[TMP1]] to i8* 1084 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP5]], i8* align 4 [[TMP6]], i64 8, i1 false) 1085 // CHECK2-NEXT: [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0 1086 // CHECK2-NEXT: [[TMP7:%.*]] = bitcast [2 x %struct.S.0]* [[TMP2]] to %struct.S.0* 1087 // CHECK2-NEXT: [[TMP8:%.*]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2 1088 // CHECK2-NEXT: [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP8]] 1089 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE4:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]] 1090 // CHECK2: omp.arraycpy.body: 1091 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP7]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1092 // CHECK2-NEXT: [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ] 1093 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP]]) 1094 // CHECK2-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], %struct.St* noundef [[AGG_TMP]]) 1095 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP]]) #[[ATTR2]] 1096 // CHECK2-NEXT: [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1 1097 // CHECK2-NEXT: [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1 1098 // CHECK2-NEXT: [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP8]] 1099 // CHECK2-NEXT: br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE4]], label [[OMP_ARRAYCPY_BODY]] 1100 // CHECK2: omp.arraycpy.done4: 1101 // CHECK2-NEXT: call void @_ZN2StC1Ev(%struct.St* noundef [[AGG_TMP6]]) 1102 // CHECK2-NEXT: call void @_ZN1SIiEC1ERKS0_2St(%struct.S.0* noundef [[VAR5]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP3]], %struct.St* noundef [[AGG_TMP6]]) 1103 // CHECK2-NEXT: call void @_ZN2StD1Ev(%struct.St* noundef [[AGG_TMP6]]) #[[ATTR2]] 1104 // CHECK2-NEXT: [[TMP9:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1105 // CHECK2-NEXT: [[TMP10:%.*]] = load i32, i32* [[TMP9]], align 4 1106 // CHECK2-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1]], i32 [[TMP10]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 1107 // CHECK2-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1108 // CHECK2-NEXT: [[TMP12:%.*]] = icmp slt i32 [[TMP11]], 1 1109 // CHECK2-NEXT: [[TMP13:%.*]] = select i1 [[TMP12]], i32 [[TMP11]], i32 1 1110 // CHECK2-NEXT: store i32 [[TMP13]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 1111 // CHECK2-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1112 // CHECK2-NEXT: store i32 [[TMP14]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1113 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1114 // CHECK2: omp.inner.for.cond: 1115 // CHECK2-NEXT: [[TMP15:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1116 // CHECK2-NEXT: [[TMP16:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1117 // CHECK2-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP15]], [[TMP16]] 1118 // CHECK2-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1119 // CHECK2: omp.inner.for.body: 1120 // CHECK2-NEXT: [[TMP17:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1121 // CHECK2-NEXT: switch i32 [[TMP17]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 1122 // CHECK2-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 1123 // CHECK2-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE7:%.*]] 1124 // CHECK2-NEXT: ] 1125 // CHECK2: .omp.sections.case: 1126 // CHECK2-NEXT: [[TMP18:%.*]] = load i32, i32* [[T_VAR1]], align 4 1127 // CHECK2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC2]], i64 0, i64 0 1128 // CHECK2-NEXT: store i32 [[TMP18]], i32* [[ARRAYIDX]], align 4 1129 // CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1130 // CHECK2: .omp.sections.case7: 1131 // CHECK2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i64 0, i64 0 1132 // CHECK2-NEXT: [[TMP19:%.*]] = bitcast %struct.S.0* [[ARRAYIDX8]] to i8* 1133 // CHECK2-NEXT: [[TMP20:%.*]] = bitcast %struct.S.0* [[VAR5]] to i8* 1134 // CHECK2-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 4, i1 false) 1135 // CHECK2-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1136 // CHECK2: .omp.sections.exit: 1137 // CHECK2-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1138 // CHECK2: omp.inner.for.inc: 1139 // CHECK2-NEXT: [[TMP21:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1140 // CHECK2-NEXT: [[INC:%.*]] = add nsw i32 [[TMP21]], 1 1141 // CHECK2-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1142 // CHECK2-NEXT: br label [[OMP_INNER_FOR_COND]] 1143 // CHECK2: omp.inner.for.end: 1144 // CHECK2-NEXT: [[TMP22:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1145 // CHECK2-NEXT: [[TMP23:%.*]] = load i32, i32* [[TMP22]], align 4 1146 // CHECK2-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP23]]) 1147 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[VAR5]]) #[[ATTR2]] 1148 // CHECK2-NEXT: [[ARRAY_BEGIN9:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR3]], i32 0, i32 0 1149 // CHECK2-NEXT: [[TMP24:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN9]], i64 2 1150 // CHECK2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1151 // CHECK2: arraydestroy.body: 1152 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP24]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1153 // CHECK2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1154 // CHECK2-NEXT: call void @_ZN1SIiED1Ev(%struct.S.0* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1155 // CHECK2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN9]] 1156 // CHECK2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE10:%.*]], label [[ARRAYDESTROY_BODY]] 1157 // CHECK2: arraydestroy.done10: 1158 // CHECK2-NEXT: [[TMP25:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1159 // CHECK2-NEXT: [[TMP26:%.*]] = load i32, i32* [[TMP25]], align 4 1160 // CHECK2-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 [[TMP26]]) 1161 // CHECK2-NEXT: ret void 1162 // 1163 // 1164 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_2St 1165 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1166 // CHECK2-NEXT: entry: 1167 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1168 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 1169 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1170 // CHECK2-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 1171 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1172 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 1173 // CHECK2-NEXT: call void @_ZN1SIiEC2ERKS0_2St(%struct.S.0* noundef [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], %struct.St* noundef [[T]]) 1174 // CHECK2-NEXT: ret void 1175 // 1176 // 1177 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev 1178 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1179 // CHECK2-NEXT: entry: 1180 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1181 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1182 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1183 // CHECK2-NEXT: call void @_ZN1SIiED2Ev(%struct.S.0* noundef [[THIS1]]) #[[ATTR2]] 1184 // CHECK2-NEXT: ret void 1185 // 1186 // 1187 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev 1188 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1189 // CHECK2-NEXT: entry: 1190 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1191 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1192 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1193 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1194 // CHECK2-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1195 // CHECK2-NEXT: store i32 [[TMP0]], i32* [[F]], align 4 1196 // CHECK2-NEXT: ret void 1197 // 1198 // 1199 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei 1200 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1201 // CHECK2-NEXT: entry: 1202 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1203 // CHECK2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 1204 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1205 // CHECK2-NEXT: store i32 [[A]], i32* [[A_ADDR]], align 4 1206 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1207 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1208 // CHECK2-NEXT: [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4 1209 // CHECK2-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1210 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP0]], [[TMP1]] 1211 // CHECK2-NEXT: store i32 [[ADD]], i32* [[F]], align 4 1212 // CHECK2-NEXT: ret void 1213 // 1214 // 1215 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_2St 1216 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], %struct.St* noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1217 // CHECK2-NEXT: entry: 1218 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1219 // CHECK2-NEXT: [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8 1220 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1221 // CHECK2-NEXT: store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8 1222 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1223 // CHECK2-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0 1224 // CHECK2-NEXT: [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8 1225 // CHECK2-NEXT: [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0 1226 // CHECK2-NEXT: [[TMP1:%.*]] = load i32, i32* [[F2]], align 4 1227 // CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_ST:%.*]], %struct.St* [[T]], i32 0, i32 0 1228 // CHECK2-NEXT: [[TMP2:%.*]] = load i32, i32* [[A]], align 4 1229 // CHECK2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]] 1230 // CHECK2-NEXT: store i32 [[ADD]], i32* [[F]], align 4 1231 // CHECK2-NEXT: ret void 1232 // 1233 // 1234 // CHECK2-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev 1235 // CHECK2-SAME: (%struct.S.0* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1236 // CHECK2-NEXT: entry: 1237 // CHECK2-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8 1238 // CHECK2-NEXT: store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8 1239 // CHECK2-NEXT: [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8 1240 // CHECK2-NEXT: ret void 1241 // 1242 // 1243 // CHECK2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp 1244 // CHECK2-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1245 // CHECK2-NEXT: entry: 1246 // CHECK2-NEXT: call void @__cxx_global_var_init() 1247 // CHECK2-NEXT: call void @__cxx_global_var_init.1() 1248 // CHECK2-NEXT: call void @__cxx_global_var_init.2() 1249 // CHECK2-NEXT: ret void 1250 // 1251 // 1252 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init 1253 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1254 // CHECK3-NEXT: entry: 1255 // CHECK3-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef @test) 1256 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 1257 // CHECK3-NEXT: ret void 1258 // 1259 // 1260 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 1261 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 1262 // CHECK3-NEXT: entry: 1263 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1264 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1265 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1266 // CHECK3-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef [[THIS1]]) 1267 // CHECK3-NEXT: ret void 1268 // 1269 // 1270 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 1271 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1272 // CHECK3-NEXT: entry: 1273 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1274 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1275 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1276 // CHECK3-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef [[THIS1]]) #[[ATTR2]] 1277 // CHECK3-NEXT: ret void 1278 // 1279 // 1280 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 1281 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1282 // CHECK3-NEXT: entry: 1283 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1284 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1285 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1286 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1287 // CHECK3-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1288 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 1289 // CHECK3-NEXT: store float [[CONV]], float* [[F]], align 4 1290 // CHECK3-NEXT: ret void 1291 // 1292 // 1293 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 1294 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1295 // CHECK3-NEXT: entry: 1296 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1297 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1298 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1299 // CHECK3-NEXT: ret void 1300 // 1301 // 1302 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 1303 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1304 // CHECK3-NEXT: entry: 1305 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00) 1306 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00) 1307 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 1308 // CHECK3-NEXT: ret void 1309 // 1310 // 1311 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 1312 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1313 // CHECK3-NEXT: entry: 1314 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1315 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1316 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1317 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1318 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1319 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1320 // CHECK3-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef [[THIS1]], float noundef [[TMP0]]) 1321 // CHECK3-NEXT: ret void 1322 // 1323 // 1324 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 1325 // CHECK3-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1326 // CHECK3-NEXT: entry: 1327 // CHECK3-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1328 // CHECK3-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1329 // CHECK3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1330 // CHECK3: arraydestroy.body: 1331 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1332 // CHECK3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1333 // CHECK3-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1334 // CHECK3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 1335 // CHECK3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1336 // CHECK3: arraydestroy.done1: 1337 // CHECK3-NEXT: ret void 1338 // 1339 // 1340 // CHECK3-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 1341 // CHECK3-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1342 // CHECK3-NEXT: entry: 1343 // CHECK3-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1344 // CHECK3-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1345 // CHECK3-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1346 // CHECK3-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1347 // CHECK3-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1348 // CHECK3-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1349 // CHECK3-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1350 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1351 // CHECK3-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 1352 // CHECK3-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 1353 // CHECK3-NEXT: store float [[ADD]], float* [[F]], align 4 1354 // CHECK3-NEXT: ret void 1355 // 1356 // 1357 // CHECK3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 1358 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1359 // CHECK3-NEXT: entry: 1360 // CHECK3-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef @var, float noundef 3.000000e+00) 1361 // CHECK3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 1362 // CHECK3-NEXT: ret void 1363 // 1364 // 1365 // CHECK3-LABEL: define {{[^@]+}}@main 1366 // CHECK3-SAME: () #[[ATTR3:[0-9]+]] { 1367 // CHECK3-NEXT: entry: 1368 // CHECK3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1369 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 8 1370 // CHECK3-NEXT: store i32 0, i32* [[RETVAL]], align 4 1371 // CHECK3-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[CLASS_ANON]], %class.anon* [[REF_TMP]], i32 0, i32 0 1372 // CHECK3-NEXT: store i32* @_ZZ4mainE5sivar, i32** [[TMP0]], align 8 1373 // CHECK3-NEXT: call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef [[REF_TMP]]) 1374 // CHECK3-NEXT: ret i32 0 1375 // 1376 // 1377 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined. 1378 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR5:[0-9]+]] { 1379 // CHECK3-NEXT: entry: 1380 // CHECK3-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1381 // CHECK3-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1382 // CHECK3-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1383 // CHECK3-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 1384 // CHECK3-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 1385 // CHECK3-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 1386 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 1387 // CHECK3-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 1388 // CHECK3-NEXT: [[G:%.*]] = alloca i32, align 4 1389 // CHECK3-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 1390 // CHECK3-NEXT: [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8 1391 // CHECK3-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1392 // CHECK3-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1393 // CHECK3-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1394 // CHECK3-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1395 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1396 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1397 // CHECK3-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 1398 // CHECK3-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 1399 // CHECK3-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1400 // CHECK3-NEXT: store i32 [[TMP1]], i32* [[G]], align 4 1401 // CHECK3-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 1402 // CHECK3-NEXT: store i32 [[TMP2]], i32* [[SIVAR1]], align 4 1403 // CHECK3-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1404 // CHECK3-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 1405 // CHECK3-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 1406 // CHECK3-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1407 // CHECK3-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 1 1408 // CHECK3-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 1 1409 // CHECK3-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 1410 // CHECK3-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1411 // CHECK3-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1412 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1413 // CHECK3: omp.inner.for.cond: 1414 // CHECK3-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1415 // CHECK3-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1416 // CHECK3-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 1417 // CHECK3-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1418 // CHECK3: omp.inner.for.body: 1419 // CHECK3-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1420 // CHECK3-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 1421 // CHECK3-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 1422 // CHECK3-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]] 1423 // CHECK3-NEXT: ] 1424 // CHECK3: .omp.sections.case: 1425 // CHECK3-NEXT: store i32 1, i32* [[G]], align 4 1426 // CHECK3-NEXT: store i32 10, i32* [[SIVAR1]], align 4 1427 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1428 // CHECK3: .omp.sections.case2: 1429 // CHECK3-NEXT: [[TMP12:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0 1430 // CHECK3-NEXT: store i32* [[G]], i32** [[TMP12]], align 8 1431 // CHECK3-NEXT: [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 1 1432 // CHECK3-NEXT: store i32* [[SIVAR1]], i32** [[TMP13]], align 8 1433 // CHECK3-NEXT: call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef [[REF_TMP]]) 1434 // CHECK3-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1435 // CHECK3: .omp.sections.exit: 1436 // CHECK3-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1437 // CHECK3: omp.inner.for.inc: 1438 // CHECK3-NEXT: [[TMP14:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1439 // CHECK3-NEXT: [[INC:%.*]] = add nsw i32 [[TMP14]], 1 1440 // CHECK3-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1441 // CHECK3-NEXT: br label [[OMP_INNER_FOR_COND]] 1442 // CHECK3: omp.inner.for.end: 1443 // CHECK3-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]]) 1444 // CHECK3-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]]) 1445 // CHECK3-NEXT: ret void 1446 // 1447 // 1448 // CHECK3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp 1449 // CHECK3-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1450 // CHECK3-NEXT: entry: 1451 // CHECK3-NEXT: call void @__cxx_global_var_init() 1452 // CHECK3-NEXT: call void @__cxx_global_var_init.1() 1453 // CHECK3-NEXT: call void @__cxx_global_var_init.2() 1454 // CHECK3-NEXT: ret void 1455 // 1456 // 1457 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init 1458 // CHECK4-SAME: () #[[ATTR0:[0-9]+]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1459 // CHECK4-NEXT: entry: 1460 // CHECK4-NEXT: call void @_ZN1SIfEC1Ev(%struct.S* noundef @test) 1461 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @test to i8*), i8* @__dso_handle) #[[ATTR2:[0-9]+]] 1462 // CHECK4-NEXT: ret void 1463 // 1464 // 1465 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev 1466 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 { 1467 // CHECK4-NEXT: entry: 1468 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1469 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1470 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1471 // CHECK4-NEXT: call void @_ZN1SIfEC2Ev(%struct.S* noundef [[THIS1]]) 1472 // CHECK4-NEXT: ret void 1473 // 1474 // 1475 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev 1476 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1477 // CHECK4-NEXT: entry: 1478 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1479 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1480 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1481 // CHECK4-NEXT: call void @_ZN1SIfED2Ev(%struct.S* noundef [[THIS1]]) #[[ATTR2]] 1482 // CHECK4-NEXT: ret void 1483 // 1484 // 1485 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev 1486 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1487 // CHECK4-NEXT: entry: 1488 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1489 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1490 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1491 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1492 // CHECK4-NEXT: [[TMP0:%.*]] = load volatile i32, i32* @g, align 4 1493 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP0]] to float 1494 // CHECK4-NEXT: store float [[CONV]], float* [[F]], align 4 1495 // CHECK4-NEXT: ret void 1496 // 1497 // 1498 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev 1499 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1500 // CHECK4-NEXT: entry: 1501 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1502 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1503 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1504 // CHECK4-NEXT: ret void 1505 // 1506 // 1507 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1 1508 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1509 // CHECK4-NEXT: entry: 1510 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 0), float noundef 1.000000e+00) 1511 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i64 0, i64 1), float noundef 2.000000e+00) 1512 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* @__cxx_global_array_dtor, i8* null, i8* @__dso_handle) #[[ATTR2]] 1513 // CHECK4-NEXT: ret void 1514 // 1515 // 1516 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ef 1517 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1518 // CHECK4-NEXT: entry: 1519 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1520 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1521 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1522 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1523 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1524 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1525 // CHECK4-NEXT: call void @_ZN1SIfEC2Ef(%struct.S* noundef [[THIS1]], float noundef [[TMP0]]) 1526 // CHECK4-NEXT: ret void 1527 // 1528 // 1529 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor 1530 // CHECK4-SAME: (i8* noundef [[TMP0:%.*]]) #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1531 // CHECK4-NEXT: entry: 1532 // CHECK4-NEXT: [[DOTADDR:%.*]] = alloca i8*, align 8 1533 // CHECK4-NEXT: store i8* [[TMP0]], i8** [[DOTADDR]], align 8 1534 // CHECK4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]] 1535 // CHECK4: arraydestroy.body: 1536 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ getelementptr inbounds ([[STRUCT_S:%.*]], %struct.S* getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0), i64 2), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ] 1537 // CHECK4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1 1538 // CHECK4-NEXT: call void @_ZN1SIfED1Ev(%struct.S* noundef [[ARRAYDESTROY_ELEMENT]]) #[[ATTR2]] 1539 // CHECK4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], getelementptr inbounds ([2 x %struct.S], [2 x %struct.S]* @s_arr, i32 0, i32 0) 1540 // CHECK4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]] 1541 // CHECK4: arraydestroy.done1: 1542 // CHECK4-NEXT: ret void 1543 // 1544 // 1545 // CHECK4-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ef 1546 // CHECK4-SAME: (%struct.S* noundef [[THIS:%.*]], float noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 { 1547 // CHECK4-NEXT: entry: 1548 // CHECK4-NEXT: [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8 1549 // CHECK4-NEXT: [[A_ADDR:%.*]] = alloca float, align 4 1550 // CHECK4-NEXT: store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8 1551 // CHECK4-NEXT: store float [[A]], float* [[A_ADDR]], align 4 1552 // CHECK4-NEXT: [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8 1553 // CHECK4-NEXT: [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0 1554 // CHECK4-NEXT: [[TMP0:%.*]] = load float, float* [[A_ADDR]], align 4 1555 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1556 // CHECK4-NEXT: [[CONV:%.*]] = sitofp i32 [[TMP1]] to float 1557 // CHECK4-NEXT: [[ADD:%.*]] = fadd float [[TMP0]], [[CONV]] 1558 // CHECK4-NEXT: store float [[ADD]], float* [[F]], align 4 1559 // CHECK4-NEXT: ret void 1560 // 1561 // 1562 // CHECK4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2 1563 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1564 // CHECK4-NEXT: entry: 1565 // CHECK4-NEXT: call void @_ZN1SIfEC1Ef(%struct.S* noundef @var, float noundef 3.000000e+00) 1566 // CHECK4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(void (i8*)* bitcast (void (%struct.S*)* @_ZN1SIfED1Ev to void (i8*)*), i8* bitcast (%struct.S* @var to i8*), i8* @__dso_handle) #[[ATTR2]] 1567 // CHECK4-NEXT: ret void 1568 // 1569 // 1570 // CHECK4-LABEL: define {{[^@]+}}@main 1571 // CHECK4-SAME: () #[[ATTR3:[0-9]+]] { 1572 // CHECK4-NEXT: entry: 1573 // CHECK4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4 1574 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8 1575 // CHECK4-NEXT: store i32 0, i32* [[RETVAL]], align 4 1576 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0 1577 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 1578 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1 1579 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 1580 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2 1581 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 1582 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3 1583 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @__main_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8 1584 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4 1585 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.3 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 1586 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5 1587 // CHECK4-NEXT: [[TMP0:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4 1588 // CHECK4-NEXT: store i32 [[TMP0]], i32* [[BLOCK_CAPTURED]], align 8 1589 // CHECK4-NEXT: [[TMP1:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()* 1590 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP1]] to %struct.__block_literal_generic* 1591 // CHECK4-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 1592 // CHECK4-NEXT: [[TMP3:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 1593 // CHECK4-NEXT: [[TMP4:%.*]] = load i8*, i8** [[TMP2]], align 8 1594 // CHECK4-NEXT: [[TMP5:%.*]] = bitcast i8* [[TMP4]] to void (i8*)* 1595 // CHECK4-NEXT: call void [[TMP5]](i8* noundef [[TMP3]]) 1596 // CHECK4-NEXT: ret i32 0 1597 // 1598 // 1599 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke 1600 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 1601 // CHECK4-NEXT: entry: 1602 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 1603 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8 1604 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 1605 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* 1606 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8 1607 // CHECK4-NEXT: call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @_ZZ4mainE5sivar) 1608 // CHECK4-NEXT: ret void 1609 // 1610 // 1611 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined. 1612 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[SIVAR:%.*]]) #[[ATTR4:[0-9]+]] { 1613 // CHECK4-NEXT: entry: 1614 // CHECK4-NEXT: [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8 1615 // CHECK4-NEXT: [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8 1616 // CHECK4-NEXT: [[SIVAR_ADDR:%.*]] = alloca i32*, align 8 1617 // CHECK4-NEXT: [[DOTOMP_SECTIONS_LB_:%.*]] = alloca i32, align 4 1618 // CHECK4-NEXT: [[DOTOMP_SECTIONS_UB_:%.*]] = alloca i32, align 4 1619 // CHECK4-NEXT: [[DOTOMP_SECTIONS_ST_:%.*]] = alloca i32, align 4 1620 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IL_:%.*]] = alloca i32, align 4 1621 // CHECK4-NEXT: [[DOTOMP_SECTIONS_IV_:%.*]] = alloca i32, align 4 1622 // CHECK4-NEXT: [[G:%.*]] = alloca i32, align 4 1623 // CHECK4-NEXT: [[SIVAR1:%.*]] = alloca i32, align 4 1624 // CHECK4-NEXT: [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, align 8 1625 // CHECK4-NEXT: store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8 1626 // CHECK4-NEXT: store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8 1627 // CHECK4-NEXT: store i32* [[SIVAR]], i32** [[SIVAR_ADDR]], align 8 1628 // CHECK4-NEXT: [[TMP0:%.*]] = load i32*, i32** [[SIVAR_ADDR]], align 8 1629 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1630 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1631 // CHECK4-NEXT: store i32 1, i32* [[DOTOMP_SECTIONS_ST_]], align 4 1632 // CHECK4-NEXT: store i32 0, i32* [[DOTOMP_SECTIONS_IL_]], align 4 1633 // CHECK4-NEXT: [[TMP1:%.*]] = load volatile i32, i32* @g, align 4 1634 // CHECK4-NEXT: store i32 [[TMP1]], i32* [[G]], align 4 1635 // CHECK4-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP0]], align 4 1636 // CHECK4-NEXT: store i32 [[TMP2]], i32* [[SIVAR1]], align 4 1637 // CHECK4-NEXT: [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8 1638 // CHECK4-NEXT: [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4 1639 // CHECK4-NEXT: call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP4]], i32 34, i32* [[DOTOMP_SECTIONS_IL_]], i32* [[DOTOMP_SECTIONS_LB_]], i32* [[DOTOMP_SECTIONS_UB_]], i32* [[DOTOMP_SECTIONS_ST_]], i32 1, i32 1) 1640 // CHECK4-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1641 // CHECK4-NEXT: [[TMP6:%.*]] = icmp slt i32 [[TMP5]], 1 1642 // CHECK4-NEXT: [[TMP7:%.*]] = select i1 [[TMP6]], i32 [[TMP5]], i32 1 1643 // CHECK4-NEXT: store i32 [[TMP7]], i32* [[DOTOMP_SECTIONS_UB_]], align 4 1644 // CHECK4-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_LB_]], align 4 1645 // CHECK4-NEXT: store i32 [[TMP8]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1646 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND:%.*]] 1647 // CHECK4: omp.inner.for.cond: 1648 // CHECK4-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1649 // CHECK4-NEXT: [[TMP10:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_UB_]], align 4 1650 // CHECK4-NEXT: [[CMP:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]] 1651 // CHECK4-NEXT: br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]] 1652 // CHECK4: omp.inner.for.body: 1653 // CHECK4-NEXT: [[TMP11:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1654 // CHECK4-NEXT: switch i32 [[TMP11]], label [[DOTOMP_SECTIONS_EXIT:%.*]] [ 1655 // CHECK4-NEXT: i32 0, label [[DOTOMP_SECTIONS_CASE:%.*]] 1656 // CHECK4-NEXT: i32 1, label [[DOTOMP_SECTIONS_CASE2:%.*]] 1657 // CHECK4-NEXT: ] 1658 // CHECK4: .omp.sections.case: 1659 // CHECK4-NEXT: store i32 1, i32* [[G]], align 4 1660 // CHECK4-NEXT: store i32 10, i32* [[SIVAR1]], align 4 1661 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1662 // CHECK4: .omp.sections.case2: 1663 // CHECK4-NEXT: [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 0 1664 // CHECK4-NEXT: store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8 1665 // CHECK4-NEXT: [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 1 1666 // CHECK4-NEXT: store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8 1667 // CHECK4-NEXT: [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 2 1668 // CHECK4-NEXT: store i32 0, i32* [[BLOCK_RESERVED]], align 4 1669 // CHECK4-NEXT: [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 3 1670 // CHECK4-NEXT: store i8* bitcast (void (i8*)* @var_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8 1671 // CHECK4-NEXT: [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 4 1672 // CHECK4-NEXT: store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8 1673 // CHECK4-NEXT: [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 5 1674 // CHECK4-NEXT: [[TMP12:%.*]] = load volatile i32, i32* [[G]], align 4 1675 // CHECK4-NEXT: store volatile i32 [[TMP12]], i32* [[BLOCK_CAPTURED]], align 8 1676 // CHECK4-NEXT: [[BLOCK_CAPTURED3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 6 1677 // CHECK4-NEXT: [[TMP13:%.*]] = load i32, i32* [[SIVAR1]], align 4 1678 // CHECK4-NEXT: store i32 [[TMP13]], i32* [[BLOCK_CAPTURED3]], align 4 1679 // CHECK4-NEXT: [[TMP14:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]] to void ()* 1680 // CHECK4-NEXT: [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP14]] to %struct.__block_literal_generic* 1681 // CHECK4-NEXT: [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3 1682 // CHECK4-NEXT: [[TMP16:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8* 1683 // CHECK4-NEXT: [[TMP17:%.*]] = load i8*, i8** [[TMP15]], align 8 1684 // CHECK4-NEXT: [[TMP18:%.*]] = bitcast i8* [[TMP17]] to void (i8*)* 1685 // CHECK4-NEXT: call void [[TMP18]](i8* noundef [[TMP16]]) 1686 // CHECK4-NEXT: br label [[DOTOMP_SECTIONS_EXIT]] 1687 // CHECK4: .omp.sections.exit: 1688 // CHECK4-NEXT: br label [[OMP_INNER_FOR_INC:%.*]] 1689 // CHECK4: omp.inner.for.inc: 1690 // CHECK4-NEXT: [[TMP19:%.*]] = load i32, i32* [[DOTOMP_SECTIONS_IV_]], align 4 1691 // CHECK4-NEXT: [[INC:%.*]] = add nsw i32 [[TMP19]], 1 1692 // CHECK4-NEXT: store i32 [[INC]], i32* [[DOTOMP_SECTIONS_IV_]], align 4 1693 // CHECK4-NEXT: br label [[OMP_INNER_FOR_COND]] 1694 // CHECK4: omp.inner.for.end: 1695 // CHECK4-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]]) 1696 // CHECK4-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP4]]) 1697 // CHECK4-NEXT: ret void 1698 // 1699 // 1700 // CHECK4-LABEL: define {{[^@]+}}@var_block_invoke 1701 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR1]] { 1702 // CHECK4-NEXT: entry: 1703 // CHECK4-NEXT: [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8 1704 // CHECK4-NEXT: [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>*, align 8 1705 // CHECK4-NEXT: store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8 1706 // CHECK4-NEXT: [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* 1707 // CHECK4-NEXT: store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>** [[BLOCK_ADDR]], align 8 1708 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 5 1709 // CHECK4-NEXT: store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8 1710 // CHECK4-NEXT: [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32, i32 }>* [[BLOCK]], i32 0, i32 6 1711 // CHECK4-NEXT: store i32 20, i32* [[BLOCK_CAPTURE_ADDR1]], align 4 1712 // CHECK4-NEXT: ret void 1713 // 1714 // 1715 // CHECK4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_sections_firstprivate_codegen.cpp 1716 // CHECK4-SAME: () #[[ATTR0]] section "__TEXT,__StaticInit,regular,pure_instructions" { 1717 // CHECK4-NEXT: entry: 1718 // CHECK4-NEXT: call void @__cxx_global_var_init() 1719 // CHECK4-NEXT: call void @__cxx_global_var_init.1() 1720 // CHECK4-NEXT: call void @__cxx_global_var_init.2() 1721 // CHECK4-NEXT: ret void 1722 // 1723