xref: /llvm-project/clang/test/OpenMP/parallel_master_taskloop_firstprivate_codegen.cpp (revision 782c59a4eef0bca8546b0cfbb1e48a9fcd044c93)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
8 
9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY3 %s
14 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY4 %s
15 // expected-no-diagnostics
16 
17 #ifndef ARRAY
18 #ifndef HEADER
19 #define HEADER
20 
21 template <class T>
22 struct S {
23   T f;
24   S(T a) : f(a) {}
25   S() : f() {}
26   S(const S &s, T t = T()) : f(s.f + t) {}
27   operator T() { return T(); }
28   ~S() {}
29 };
30 
31 volatile double g;
32 
33 template <typename T>
34 T tmain() {
35   S<T> ttt;
36   S<T> test(ttt);
37   T t_var __attribute__((aligned(128))) = T();
38   T vec[] = {1, 2};
39   S<T> s_arr[] = {1, 2};
40   S<T> var(3);
41 #pragma omp parallel master taskloop firstprivate(t_var, vec, s_arr, s_arr, var, var)
42   for (int i = 0; i < 10; ++i) {
43     vec[0] = t_var;
44     s_arr[0] = var;
45   }
46   return T();
47 }
48 
49 int main() {
50   static int sivar;
51 #ifdef LAMBDA
52   [&]() {
53 
54 
55 #pragma omp parallel master taskloop firstprivate(g, sivar)
56   for (int i = 0; i < 10; ++i) {
57 
58     g = 1;
59     sivar = 11;
60     [&]() {
61       g = 2;
62       sivar = 22;
63     }();
64   }
65   }();
66   return 0;
67 #elif defined(BLOCKS)
68   ^{
69 
70 #pragma omp parallel master taskloop firstprivate(g, sivar)
71   for (int i = 0; i < 10; ++i) {
72 
73     g = 1;
74     sivar = 11;
75     ^{
76       g = 2;
77       sivar = 22;
78     }();
79   }
80   }();
81   return 0;
82 #else
83   S<double> ttt;
84   S<double> test(ttt);
85   int t_var = 0;
86   int vec[] = {1, 2};
87   S<double> s_arr[] = {1, 2};
88   S<double> var(3);
89 #pragma omp parallel master taskloop firstprivate(var, t_var, s_arr, vec, s_arr, var, sivar)
90   for (int i = 0; i < 10; ++i) {
91     vec[0] = t_var;
92     s_arr[0] = var;
93     sivar = 33;
94   }
95   return tmain<int>();
96 #endif
97 }
98 
99 
100 
101 // Store original variables in capture struct.
102 
103 // Allocate task.
104 // Returns struct kmp_task_t {
105 //         [[KMP_TASK_T]] task_data;
106 //         [[KMP_TASK_MAIN_TY]] privates;
107 //       };
108 
109 // Fill kmp_task_t->shareds by copying from original capture argument.
110 
111 // Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
112 // Also copy address of private copy to the corresponding shareds reference.
113 
114 // Constructors for s_arr and var.
115 // s_arr;
116 
117 // var;
118 
119 // t_var;
120 
121 // vec;
122 
123 // sivar;
124 
125 // Provide pointer to destructor function, which will destroy private variables at the end of the task.
126 
127 // Start task.
128 
129 
130 
131 
132 
133 
134 // Privates actually are used.
135 
136 
137 
138 
139 
140 
141 
142 
143 // Store original variables in capture struct.
144 
145 // Allocate task.
146 // Returns struct kmp_task_t {
147 //         [[KMP_TASK_T_TY]] task_data;
148 //         [[KMP_TASK_TMAIN_TY]] privates;
149 //       };
150 
151 // Fill kmp_task_t->shareds by copying from original capture argument.
152 
153 // Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
154 
155 // t_var;
156 
157 // vec;
158 
159 // Constructors for s_arr and var.
160 // a_arr;
161 
162 // var;
163 
164 // Provide pointer to destructor function, which will destroy private variables at the end of the task.
165 
166 // Start task.
167 
168 
169 
170 // Privates actually are used.
171 
172 
173 
174 
175 
176 
177 #endif
178 #else
179 struct St {
180   int a, b;
181   St() : a(0), b(0) {}
182   St(const St &) {}
183   ~St() {}
184 };
185 
186 void array_func(int n, float a[n], St s[2]) {
187 #pragma omp parallel master taskloop firstprivate(a, s)
188   for (int i = 0; i < 10; ++i)
189     ;
190 }
191 #endif
192 
193 // CHECK-LABEL: define {{[^@]+}}@main
194 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
195 // CHECK-NEXT:  entry:
196 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
197 // CHECK-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
198 // CHECK-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
199 // CHECK-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
200 // CHECK-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
201 // CHECK-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
202 // CHECK-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
203 // CHECK-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
204 // CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
205 // CHECK-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
206 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
207 // CHECK-NEXT:    store i32 0, i32* [[T_VAR]], align 4
208 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
209 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
210 // CHECK-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
211 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
212 // CHECK-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
213 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
214 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
215 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4
216 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
217 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
218 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
219 // CHECK-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*)* @main.omp_outlined to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i64 [[TMP2]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]])
220 // CHECK-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
221 // CHECK-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
222 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
223 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
224 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
225 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
226 // CHECK:       arraydestroy.body:
227 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
228 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
229 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
230 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
231 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
232 // CHECK:       arraydestroy.done1:
233 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
234 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
235 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
236 // CHECK-NEXT:    ret i32 [[TMP4]]
237 //
238 //
239 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
240 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
241 // CHECK-NEXT:  entry:
242 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
243 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
244 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
245 // CHECK-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
246 // CHECK-NEXT:    ret void
247 //
248 //
249 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
250 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
251 // CHECK-NEXT:  entry:
252 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
253 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
254 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
255 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
256 // CHECK-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
257 // CHECK-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
258 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
259 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
260 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
261 // CHECK-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
262 // CHECK-NEXT:    ret void
263 //
264 //
265 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
266 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
267 // CHECK-NEXT:  entry:
268 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
269 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
270 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
271 // CHECK-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
272 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
273 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
274 // CHECK-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
275 // CHECK-NEXT:    ret void
276 //
277 //
278 // CHECK-LABEL: define {{[^@]+}}@main.omp_outlined
279 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 noundef [[T_VAR:%.*]], [2 x %struct.S]* noundef nonnull align 8 dereferenceable(16) [[S_ARR:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR:%.*]]) #[[ATTR3:[0-9]+]] {
280 // CHECK-NEXT:  entry:
281 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
282 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
283 // CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
284 // CHECK-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
285 // CHECK-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
286 // CHECK-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
287 // CHECK-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
288 // CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
289 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
290 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
291 // CHECK-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
292 // CHECK-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
293 // CHECK-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
294 // CHECK-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
295 // CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
296 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
297 // CHECK-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
298 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
299 // CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
300 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
301 // CHECK-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
302 // CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
303 // CHECK-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
304 // CHECK:       omp_if.then:
305 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
306 // CHECK-NEXT:    store [2 x %struct.S]* [[TMP1]], [2 x %struct.S]** [[TMP7]], align 8
307 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
308 // CHECK-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP8]], align 8
309 // CHECK-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
310 // CHECK-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 9, i64 120, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
311 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct.kmp_task_t_with_privates*
312 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP10]], i32 0, i32 0
313 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 0
314 // CHECK-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
315 // CHECK-NEXT:    [[TMP14:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
316 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 [[TMP14]], i64 16, i1 false)
317 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP10]], i32 0, i32 1
318 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 0
319 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP16]], i32 0, i32 0
320 // CHECK-NEXT:    [[TMP17:%.*]] = bitcast [2 x %struct.S]* [[TMP1]] to %struct.S*
321 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
322 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP18]]
323 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
324 // CHECK:       omp.arraycpy.body:
325 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP17]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
326 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
327 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]], double noundef 0.000000e+00)
328 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
329 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
330 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP18]]
331 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
332 // CHECK:       omp.arraycpy.done1:
333 // CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 1
334 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP19]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP2]], double noundef 0.000000e+00)
335 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 2
336 // CHECK-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV]], align 4
337 // CHECK-NEXT:    store i32 [[TMP21]], i32* [[TMP20]], align 8
338 // CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 3
339 // CHECK-NEXT:    [[TMP23:%.*]] = bitcast [2 x i32]* [[TMP22]] to i8*
340 // CHECK-NEXT:    [[TMP24:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
341 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 8, i1 false)
342 // CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 4
343 // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
344 // CHECK-NEXT:    store i32 [[TMP26]], i32* [[TMP25]], align 4
345 // CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 3
346 // CHECK-NEXT:    [[TMP28:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP27]] to i32 (i32, i8*)**
347 // CHECK-NEXT:    store i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_destructor. to i32 (i32, i8*)*), i32 (i32, i8*)** [[TMP28]], align 8
348 // CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 5
349 // CHECK-NEXT:    store i64 0, i64* [[TMP29]], align 8
350 // CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 6
351 // CHECK-NEXT:    store i64 9, i64* [[TMP30]], align 8
352 // CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 7
353 // CHECK-NEXT:    store i64 1, i64* [[TMP31]], align 8
354 // CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 9
355 // CHECK-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i8*
356 // CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 8, i1 false)
357 // CHECK-NEXT:    [[TMP34:%.*]] = load i64, i64* [[TMP31]], align 8
358 // CHECK-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP9]], i32 1, i64* [[TMP29]], i64* [[TMP30]], i64 [[TMP34]], i32 1, i32 0, i64 0, i8* bitcast (void (%struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates*, i32)* @.omp_task_dup. to i8*))
359 // CHECK-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
360 // CHECK-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
361 // CHECK-NEXT:    br label [[OMP_IF_END]]
362 // CHECK:       omp_if.end:
363 // CHECK-NEXT:    ret void
364 //
365 //
366 // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map.
367 // CHECK-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], %struct.S** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]], [2 x %struct.S]** noalias noundef [[TMP3:%.*]], [2 x i32]** noalias noundef [[TMP4:%.*]], i32** noalias noundef [[TMP5:%.*]]) #[[ATTR6:[0-9]+]] {
368 // CHECK-NEXT:  entry:
369 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
370 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.S**, align 8
371 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
372 // CHECK-NEXT:    [[DOTADDR3:%.*]] = alloca [2 x %struct.S]**, align 8
373 // CHECK-NEXT:    [[DOTADDR4:%.*]] = alloca [2 x i32]**, align 8
374 // CHECK-NEXT:    [[DOTADDR5:%.*]] = alloca i32**, align 8
375 // CHECK-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
376 // CHECK-NEXT:    store %struct.S** [[TMP1]], %struct.S*** [[DOTADDR1]], align 8
377 // CHECK-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
378 // CHECK-NEXT:    store [2 x %struct.S]** [[TMP3]], [2 x %struct.S]*** [[DOTADDR3]], align 8
379 // CHECK-NEXT:    store [2 x i32]** [[TMP4]], [2 x i32]*** [[DOTADDR4]], align 8
380 // CHECK-NEXT:    store i32** [[TMP5]], i32*** [[DOTADDR5]], align 8
381 // CHECK-NEXT:    [[TMP6:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
382 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 0
383 // CHECK-NEXT:    [[TMP8:%.*]] = load [2 x %struct.S]**, [2 x %struct.S]*** [[DOTADDR3]], align 8
384 // CHECK-NEXT:    store [2 x %struct.S]* [[TMP7]], [2 x %struct.S]** [[TMP8]], align 8
385 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 1
386 // CHECK-NEXT:    [[TMP10:%.*]] = load %struct.S**, %struct.S*** [[DOTADDR1]], align 8
387 // CHECK-NEXT:    store %struct.S* [[TMP9]], %struct.S** [[TMP10]], align 8
388 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 2
389 // CHECK-NEXT:    [[TMP12:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
390 // CHECK-NEXT:    store i32* [[TMP11]], i32** [[TMP12]], align 8
391 // CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 3
392 // CHECK-NEXT:    [[TMP14:%.*]] = load [2 x i32]**, [2 x i32]*** [[DOTADDR4]], align 8
393 // CHECK-NEXT:    store [2 x i32]* [[TMP13]], [2 x i32]** [[TMP14]], align 8
394 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 4
395 // CHECK-NEXT:    [[TMP16:%.*]] = load i32**, i32*** [[DOTADDR5]], align 8
396 // CHECK-NEXT:    store i32* [[TMP15]], i32** [[TMP16]], align 8
397 // CHECK-NEXT:    ret void
398 //
399 //
400 // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry.
401 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
402 // CHECK-NEXT:  entry:
403 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
404 // CHECK-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
405 // CHECK-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
406 // CHECK-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
407 // CHECK-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
408 // CHECK-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
409 // CHECK-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
410 // CHECK-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
411 // CHECK-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
412 // CHECK-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
413 // CHECK-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
414 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca %struct.S*, align 8
415 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
416 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x %struct.S]*, align 8
417 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [2 x i32]*, align 8
418 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR4_I:%.*]] = alloca i32*, align 8
419 // CHECK-NEXT:    [[I_I:%.*]] = alloca i32, align 4
420 // CHECK-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
421 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
422 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
423 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
424 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
425 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
426 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
427 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
428 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
429 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
430 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
431 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
432 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
433 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
434 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
435 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
436 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
437 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
438 // CHECK-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
439 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
440 // CHECK-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
441 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
442 // CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
443 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
444 // CHECK-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
445 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
446 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
447 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
448 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
449 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
450 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
451 // CHECK-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
452 // CHECK-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
453 // CHECK-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, %struct.S**, i32**, [2 x %struct.S]**, [2 x i32]**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
454 // CHECK-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
455 // CHECK-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
456 // CHECK-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
457 // CHECK-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
458 // CHECK-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
459 // CHECK-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
460 // CHECK-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
461 // CHECK-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
462 // CHECK-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
463 // CHECK-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
464 // CHECK-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, %struct.S**, i32**, [2 x %struct.S]**, [2 x i32]**, i32**)*
465 // CHECK-NEXT:    call void [[TMP25]](i8* [[TMP24]], %struct.S** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x %struct.S]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR4_I]]) #[[ATTR4]]
466 // CHECK-NEXT:    [[TMP26:%.*]] = load %struct.S*, %struct.S** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
467 // CHECK-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
468 // CHECK-NEXT:    [[TMP28:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !14
469 // CHECK-NEXT:    [[TMP29:%.*]] = load [2 x i32]*, [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !14
470 // CHECK-NEXT:    [[TMP30:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR4_I]], align 8, !noalias !14
471 // CHECK-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
472 // CHECK-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP31]] to i32
473 // CHECK-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
474 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
475 // CHECK:       omp.inner.for.cond.i:
476 // CHECK-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
477 // CHECK-NEXT:    [[CONV5_I:%.*]] = sext i32 [[TMP32]] to i64
478 // CHECK-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
479 // CHECK-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV5_I]], [[TMP33]]
480 // CHECK-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__EXIT:%.*]]
481 // CHECK:       omp.inner.for.body.i:
482 // CHECK-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
483 // CHECK-NEXT:    store i32 [[TMP34]], i32* [[I_I]], align 4, !noalias !14
484 // CHECK-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP27]], align 4
485 // CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP29]], i64 0, i64 0
486 // CHECK-NEXT:    store i32 [[TMP35]], i32* [[ARRAYIDX_I]], align 4
487 // CHECK-NEXT:    [[ARRAYIDX6_I:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP28]], i64 0, i64 0
488 // CHECK-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[ARRAYIDX6_I]] to i8*
489 // CHECK-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP26]] to i8*
490 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP36]], i8* align 8 [[TMP37]], i64 8, i1 false)
491 // CHECK-NEXT:    store i32 33, i32* [[TMP30]], align 4
492 // CHECK-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
493 // CHECK-NEXT:    [[ADD7_I:%.*]] = add nsw i32 [[TMP38]], 1
494 // CHECK-NEXT:    store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
495 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I]]
496 // CHECK:       .omp_outlined..exit:
497 // CHECK-NEXT:    ret i32 0
498 //
499 //
500 // CHECK-LABEL: define {{[^@]+}}@.omp_task_dup.
501 // CHECK-SAME: (%struct.kmp_task_t_with_privates* noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noundef [[TMP1:%.*]], i32 noundef [[TMP2:%.*]]) #[[ATTR7]] {
502 // CHECK-NEXT:  entry:
503 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
504 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
505 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32, align 4
506 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP0]], %struct.kmp_task_t_with_privates** [[DOTADDR]], align 8
507 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
508 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTADDR2]], align 4
509 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR]], align 8
510 // CHECK-NEXT:    [[TMP4:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
511 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP4]], i32 0, i32 0
512 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP5]], i32 0, i32 0
513 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
514 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
515 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
516 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP8]], i32 0, i32 0
517 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP9]], i32 0, i32 0
518 // CHECK-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[TMP11]], align 8
519 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP10]], i32 0, i32 0
520 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x %struct.S]* [[TMP12]] to %struct.S*
521 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
522 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP14]]
523 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE3:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
524 // CHECK:       omp.arraycpy.body:
525 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
526 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
527 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]], double noundef 0.000000e+00)
528 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
529 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
530 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
531 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE3]], label [[OMP_ARRAYCPY_BODY]]
532 // CHECK:       omp.arraycpy.done3:
533 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP8]], i32 0, i32 1
534 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP9]], i32 0, i32 1
535 // CHECK-NEXT:    [[TMP17:%.*]] = load %struct.S*, %struct.S** [[TMP16]], align 8
536 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP15]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP17]], double noundef 0.000000e+00)
537 // CHECK-NEXT:    ret void
538 //
539 //
540 // CHECK-LABEL: define {{[^@]+}}@.omp_task_destructor.
541 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
542 // CHECK-NEXT:  entry:
543 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
544 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
545 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
546 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
547 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
548 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
549 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 1
550 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
551 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
552 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR4]]
553 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP4]], i32 0, i32 0
554 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
555 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
556 // CHECK:       arraydestroy.body:
557 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
558 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
559 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
560 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
561 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
562 // CHECK:       arraydestroy.done2:
563 // CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
564 // CHECK-NEXT:    ret i32 [[TMP7]]
565 //
566 //
567 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
568 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
569 // CHECK-NEXT:  entry:
570 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
571 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
572 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
573 // CHECK-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
574 // CHECK-NEXT:    ret void
575 //
576 //
577 // CHECK-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
578 // CHECK-SAME: () #[[ATTR9:[0-9]+]] {
579 // CHECK-NEXT:  entry:
580 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
581 // CHECK-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
582 // CHECK-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
583 // CHECK-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
584 // CHECK-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
585 // CHECK-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
586 // CHECK-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
587 // CHECK-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
588 // CHECK-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
589 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
590 // CHECK-NEXT:    store i32 0, i32* [[T_VAR]], align 128
591 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
592 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
593 // CHECK-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
594 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
595 // CHECK-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
596 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
597 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
598 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 128
599 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
600 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
601 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
602 // CHECK-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @_Z5tmainIiET_v.omp_outlined to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i64 [[TMP2]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]])
603 // CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
604 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
605 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
606 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
607 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
608 // CHECK:       arraydestroy.body:
609 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
610 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
611 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
612 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
613 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
614 // CHECK:       arraydestroy.done1:
615 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
616 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
617 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
618 // CHECK-NEXT:    ret i32 [[TMP4]]
619 //
620 //
621 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
622 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
623 // CHECK-NEXT:  entry:
624 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
625 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
626 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
627 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
628 // CHECK-NEXT:    store double 0.000000e+00, double* [[F]], align 8
629 // CHECK-NEXT:    ret void
630 //
631 //
632 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
633 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
634 // CHECK-NEXT:  entry:
635 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
636 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
637 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
638 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
639 // CHECK-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
640 // CHECK-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
641 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
642 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
643 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
644 // CHECK-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
645 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
646 // CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
647 // CHECK-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
648 // CHECK-NEXT:    store double [[ADD]], double* [[F]], align 8
649 // CHECK-NEXT:    ret void
650 //
651 //
652 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
653 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
654 // CHECK-NEXT:  entry:
655 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
656 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
657 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
658 // CHECK-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
659 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
660 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
661 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
662 // CHECK-NEXT:    store double [[TMP0]], double* [[F]], align 8
663 // CHECK-NEXT:    ret void
664 //
665 //
666 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
667 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
668 // CHECK-NEXT:  entry:
669 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
670 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
671 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
672 // CHECK-NEXT:    ret void
673 //
674 //
675 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
676 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
677 // CHECK-NEXT:  entry:
678 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
679 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
680 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
681 // CHECK-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
682 // CHECK-NEXT:    ret void
683 //
684 //
685 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
686 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
687 // CHECK-NEXT:  entry:
688 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
689 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
690 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
691 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
692 // CHECK-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
693 // CHECK-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
694 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
695 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
696 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
697 // CHECK-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
698 // CHECK-NEXT:    ret void
699 //
700 //
701 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
702 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
703 // CHECK-NEXT:  entry:
704 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
705 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
706 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
707 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
708 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
709 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
710 // CHECK-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
711 // CHECK-NEXT:    ret void
712 //
713 //
714 // CHECK-LABEL: define {{[^@]+}}@_Z5tmainIiET_v.omp_outlined
715 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 noundef [[T_VAR:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
716 // CHECK-NEXT:  entry:
717 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
718 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
719 // CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
720 // CHECK-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
721 // CHECK-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
722 // CHECK-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
723 // CHECK-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
724 // CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
725 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
726 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
727 // CHECK-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
728 // CHECK-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
729 // CHECK-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
730 // CHECK-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
731 // CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
732 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
733 // CHECK-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
734 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
735 // CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
736 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
737 // CHECK-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
738 // CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
739 // CHECK-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
740 // CHECK:       omp_if.then:
741 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 0
742 // CHECK-NEXT:    store [2 x %struct.S.0]* [[TMP1]], [2 x %struct.S.0]** [[TMP7]], align 8
743 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 1
744 // CHECK-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP8]], align 8
745 // CHECK-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
746 // CHECK-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 9, i64 256, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_entry..3 to i32 (i32, i8*)*))
747 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct.kmp_task_t_with_privates.2*
748 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP10]], i32 0, i32 0
749 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 0
750 // CHECK-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 128
751 // CHECK-NEXT:    [[TMP14:%.*]] = bitcast %struct.anon.1* [[AGG_CAPTURED]] to i8*
752 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 [[TMP14]], i64 16, i1 false)
753 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP10]], i32 0, i32 2
754 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 0
755 // CHECK-NEXT:    [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
756 // CHECK-NEXT:    store i32 [[TMP17]], i32* [[TMP16]], align 128
757 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 1
758 // CHECK-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[TMP18]] to i8*
759 // CHECK-NEXT:    [[TMP20:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
760 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 8, i1 false)
761 // CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 2
762 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP21]], i32 0, i32 0
763 // CHECK-NEXT:    [[TMP22:%.*]] = bitcast [2 x %struct.S.0]* [[TMP1]] to %struct.S.0*
764 // CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
765 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP23]]
766 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
767 // CHECK:       omp.arraycpy.body:
768 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP22]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
769 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
770 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 noundef 0)
771 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
772 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
773 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP23]]
774 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
775 // CHECK:       omp.arraycpy.done1:
776 // CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 3
777 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP24]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP2]], i32 noundef 0)
778 // CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 3
779 // CHECK-NEXT:    [[TMP26:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP25]] to i32 (i32, i8*)**
780 // CHECK-NEXT:    store i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_destructor..5 to i32 (i32, i8*)*), i32 (i32, i8*)** [[TMP26]], align 8
781 // CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 5
782 // CHECK-NEXT:    store i64 0, i64* [[TMP27]], align 8
783 // CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 6
784 // CHECK-NEXT:    store i64 9, i64* [[TMP28]], align 16
785 // CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 7
786 // CHECK-NEXT:    store i64 1, i64* [[TMP29]], align 8
787 // CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 9
788 // CHECK-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i8*
789 // CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 8, i1 false)
790 // CHECK-NEXT:    [[TMP32:%.*]] = load i64, i64* [[TMP29]], align 8
791 // CHECK-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP9]], i32 1, i64* [[TMP27]], i64* [[TMP28]], i64 [[TMP32]], i32 1, i32 0, i64 0, i8* bitcast (void (%struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2*, i32)* @.omp_task_dup..4 to i8*))
792 // CHECK-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
793 // CHECK-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
794 // CHECK-NEXT:    br label [[OMP_IF_END]]
795 // CHECK:       omp_if.end:
796 // CHECK-NEXT:    ret void
797 //
798 //
799 // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map..2
800 // CHECK-SAME: (%struct..kmp_privates.t.3* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]], [2 x i32]** noalias noundef [[TMP2:%.*]], [2 x %struct.S.0]** noalias noundef [[TMP3:%.*]], %struct.S.0** noalias noundef [[TMP4:%.*]]) #[[ATTR6]] {
801 // CHECK-NEXT:  entry:
802 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.3*, align 8
803 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca i32**, align 8
804 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca [2 x i32]**, align 8
805 // CHECK-NEXT:    [[DOTADDR3:%.*]] = alloca [2 x %struct.S.0]**, align 8
806 // CHECK-NEXT:    [[DOTADDR4:%.*]] = alloca %struct.S.0**, align 8
807 // CHECK-NEXT:    store %struct..kmp_privates.t.3* [[TMP0]], %struct..kmp_privates.t.3** [[DOTADDR]], align 8
808 // CHECK-NEXT:    store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8
809 // CHECK-NEXT:    store [2 x i32]** [[TMP2]], [2 x i32]*** [[DOTADDR2]], align 8
810 // CHECK-NEXT:    store [2 x %struct.S.0]** [[TMP3]], [2 x %struct.S.0]*** [[DOTADDR3]], align 8
811 // CHECK-NEXT:    store %struct.S.0** [[TMP4]], %struct.S.0*** [[DOTADDR4]], align 8
812 // CHECK-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t.3*, %struct..kmp_privates.t.3** [[DOTADDR]], align 8
813 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 0
814 // CHECK-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8
815 // CHECK-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
816 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 1
817 // CHECK-NEXT:    [[TMP9:%.*]] = load [2 x i32]**, [2 x i32]*** [[DOTADDR2]], align 8
818 // CHECK-NEXT:    store [2 x i32]* [[TMP8]], [2 x i32]** [[TMP9]], align 8
819 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 2
820 // CHECK-NEXT:    [[TMP11:%.*]] = load [2 x %struct.S.0]**, [2 x %struct.S.0]*** [[DOTADDR3]], align 8
821 // CHECK-NEXT:    store [2 x %struct.S.0]* [[TMP10]], [2 x %struct.S.0]** [[TMP11]], align 8
822 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 3
823 // CHECK-NEXT:    [[TMP13:%.*]] = load %struct.S.0**, %struct.S.0*** [[DOTADDR4]], align 8
824 // CHECK-NEXT:    store %struct.S.0* [[TMP12]], %struct.S.0** [[TMP13]], align 8
825 // CHECK-NEXT:    ret void
826 //
827 //
828 // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry..3
829 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
830 // CHECK-NEXT:  entry:
831 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
832 // CHECK-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
833 // CHECK-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
834 // CHECK-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
835 // CHECK-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
836 // CHECK-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
837 // CHECK-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
838 // CHECK-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
839 // CHECK-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
840 // CHECK-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
841 // CHECK-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
842 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8
843 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [2 x i32]*, align 8
844 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x %struct.S.0]*, align 8
845 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca %struct.S.0*, align 8
846 // CHECK-NEXT:    [[I_I:%.*]] = alloca i32, align 4
847 // CHECK-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
848 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
849 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
850 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
851 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
852 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
853 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
854 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 0
855 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
856 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
857 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 128
858 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
859 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 2
860 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.3* [[TMP9]] to i8*
861 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.2* [[TMP3]] to i8*
862 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
863 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
864 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
865 // CHECK-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 16
866 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
867 // CHECK-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
868 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
869 // CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 64
870 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
871 // CHECK-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
872 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META17:![0-9]+]])
873 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META20:![0-9]+]])
874 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META22:![0-9]+]])
875 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
876 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]])
877 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !28
878 // CHECK-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !28
879 // CHECK-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !28
880 // CHECK-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.3*, i32**, [2 x i32]**, [2 x %struct.S.0]**, %struct.S.0**)* @.omp_task_privates_map..2 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !28
881 // CHECK-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !28
882 // CHECK-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !28
883 // CHECK-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !28
884 // CHECK-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !28
885 // CHECK-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !28
886 // CHECK-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !28
887 // CHECK-NEXT:    store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
888 // CHECK-NEXT:    [[TMP22:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !28
889 // CHECK-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !28
890 // CHECK-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !28
891 // CHECK-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, i32**, [2 x i32]**, [2 x %struct.S.0]**, %struct.S.0**)*
892 // CHECK-NEXT:    call void [[TMP25]](i8* [[TMP24]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x %struct.S.0]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], %struct.S.0** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
893 // CHECK-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !28
894 // CHECK-NEXT:    [[TMP27:%.*]] = load [2 x i32]*, [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !28
895 // CHECK-NEXT:    [[TMP28:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !28
896 // CHECK-NEXT:    [[TMP29:%.*]] = load %struct.S.0*, %struct.S.0** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !28
897 // CHECK-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !28
898 // CHECK-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP30]] to i32
899 // CHECK-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !28
900 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
901 // CHECK:       omp.inner.for.cond.i:
902 // CHECK-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !28
903 // CHECK-NEXT:    [[CONV4_I:%.*]] = sext i32 [[TMP31]] to i64
904 // CHECK-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !28
905 // CHECK-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP32]]
906 // CHECK-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
907 // CHECK:       omp.inner.for.body.i:
908 // CHECK-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !28
909 // CHECK-NEXT:    store i32 [[TMP33]], i32* [[I_I]], align 4, !noalias !28
910 // CHECK-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP26]], align 128
911 // CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP27]], i64 0, i64 0
912 // CHECK-NEXT:    store i32 [[TMP34]], i32* [[ARRAYIDX_I]], align 4
913 // CHECK-NEXT:    [[ARRAYIDX5_I:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP28]], i64 0, i64 0
914 // CHECK-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5_I]] to i8*
915 // CHECK-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP29]] to i8*
916 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false)
917 // CHECK-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !28
918 // CHECK-NEXT:    [[ADD6_I:%.*]] = add nsw i32 [[TMP37]], 1
919 // CHECK-NEXT:    store i32 [[ADD6_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !28
920 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I]]
921 // CHECK:       .omp_outlined..1.exit:
922 // CHECK-NEXT:    ret i32 0
923 //
924 //
925 // CHECK-LABEL: define {{[^@]+}}@.omp_task_dup..4
926 // CHECK-SAME: (%struct.kmp_task_t_with_privates.2* noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noundef [[TMP1:%.*]], i32 noundef [[TMP2:%.*]]) #[[ATTR7]] {
927 // CHECK-NEXT:  entry:
928 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
929 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
930 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32, align 4
931 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP0]], %struct.kmp_task_t_with_privates.2** [[DOTADDR]], align 8
932 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
933 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTADDR2]], align 4
934 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR]], align 8
935 // CHECK-NEXT:    [[TMP4:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
936 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP4]], i32 0, i32 0
937 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP5]], i32 0, i32 0
938 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 128
939 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 2
940 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
941 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP8]], i32 0, i32 2
942 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP9]], i32 0, i32 0
943 // CHECK-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[TMP11]], align 8
944 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP10]], i32 0, i32 0
945 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x %struct.S.0]* [[TMP12]] to %struct.S.0*
946 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
947 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP14]]
948 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE3:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
949 // CHECK:       omp.arraycpy.body:
950 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
951 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
952 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 noundef 0)
953 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
954 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
955 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
956 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE3]], label [[OMP_ARRAYCPY_BODY]]
957 // CHECK:       omp.arraycpy.done3:
958 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP8]], i32 0, i32 3
959 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP9]], i32 0, i32 1
960 // CHECK-NEXT:    [[TMP17:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP16]], align 8
961 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP15]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP17]], i32 noundef 0)
962 // CHECK-NEXT:    ret void
963 //
964 //
965 // CHECK-LABEL: define {{[^@]+}}@.omp_task_destructor..5
966 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
967 // CHECK-NEXT:  entry:
968 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
969 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
970 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
971 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
972 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
973 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
974 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP2]], i32 0, i32 2
975 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 2
976 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 3
977 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP5]]) #[[ATTR4]]
978 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP4]], i32 0, i32 0
979 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
980 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
981 // CHECK:       arraydestroy.body:
982 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
983 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
984 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
985 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
986 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
987 // CHECK:       arraydestroy.done2:
988 // CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
989 // CHECK-NEXT:    ret i32 [[TMP7]]
990 //
991 //
992 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
993 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
994 // CHECK-NEXT:  entry:
995 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
996 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
997 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
998 // CHECK-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
999 // CHECK-NEXT:    ret void
1000 //
1001 //
1002 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1003 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1004 // CHECK-NEXT:  entry:
1005 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1006 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1007 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1008 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1009 // CHECK-NEXT:    store i32 0, i32* [[F]], align 4
1010 // CHECK-NEXT:    ret void
1011 //
1012 //
1013 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
1014 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1015 // CHECK-NEXT:  entry:
1016 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1017 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1018 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1019 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1020 // CHECK-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1021 // CHECK-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1022 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1023 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1024 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1025 // CHECK-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
1026 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
1027 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
1028 // CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
1029 // CHECK-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1030 // CHECK-NEXT:    ret void
1031 //
1032 //
1033 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1034 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1035 // CHECK-NEXT:  entry:
1036 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1037 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1038 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1039 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1040 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1041 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1042 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1043 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1044 // CHECK-NEXT:    ret void
1045 //
1046 //
1047 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1048 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1049 // CHECK-NEXT:  entry:
1050 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1051 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1052 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1053 // CHECK-NEXT:    ret void
1054 //
1055 //
1056 // LAMBDA-LABEL: define {{[^@]+}}@main
1057 // LAMBDA-SAME: () #[[ATTR0:[0-9]+]] {
1058 // LAMBDA-NEXT:  entry:
1059 // LAMBDA-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1060 // LAMBDA-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1061 // LAMBDA-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1062 // LAMBDA-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1063 // LAMBDA-NEXT:    ret i32 0
1064 //
1065 //
1066 // LAMBDA-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1067 // LAMBDA-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], double** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR5:[0-9]+]] {
1068 // LAMBDA-NEXT:  entry:
1069 // LAMBDA-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1070 // LAMBDA-NEXT:    [[DOTADDR1:%.*]] = alloca double**, align 8
1071 // LAMBDA-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
1072 // LAMBDA-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1073 // LAMBDA-NEXT:    store double** [[TMP1]], double*** [[DOTADDR1]], align 8
1074 // LAMBDA-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
1075 // LAMBDA-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1076 // LAMBDA-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1077 // LAMBDA-NEXT:    [[TMP5:%.*]] = load double**, double*** [[DOTADDR1]], align 8
1078 // LAMBDA-NEXT:    store double* [[TMP4]], double** [[TMP5]], align 8
1079 // LAMBDA-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1080 // LAMBDA-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
1081 // LAMBDA-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
1082 // LAMBDA-NEXT:    ret void
1083 //
1084 //
1085 // LAMBDA-LABEL: define {{[^@]+}}@.omp_task_entry.
1086 // LAMBDA-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
1087 // LAMBDA-NEXT:  entry:
1088 // LAMBDA-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1089 // LAMBDA-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1090 // LAMBDA-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1091 // LAMBDA-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1092 // LAMBDA-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1093 // LAMBDA-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1094 // LAMBDA-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1095 // LAMBDA-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1096 // LAMBDA-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1097 // LAMBDA-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1098 // LAMBDA-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1099 // LAMBDA-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca double*, align 8
1100 // LAMBDA-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
1101 // LAMBDA-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1102 // LAMBDA-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1103 // LAMBDA-NEXT:    [[REF_TMP_I:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1104 // LAMBDA-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1105 // LAMBDA-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1106 // LAMBDA-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1107 // LAMBDA-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1108 // LAMBDA-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1109 // LAMBDA-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1110 // LAMBDA-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1111 // LAMBDA-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1112 // LAMBDA-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1113 // LAMBDA-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1114 // LAMBDA-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1115 // LAMBDA-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1116 // LAMBDA-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1117 // LAMBDA-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1118 // LAMBDA-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1119 // LAMBDA-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1120 // LAMBDA-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1121 // LAMBDA-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1122 // LAMBDA-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1123 // LAMBDA-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1124 // LAMBDA-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1125 // LAMBDA-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1126 // LAMBDA-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1127 // LAMBDA-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1128 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1129 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1130 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1131 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1132 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1133 // LAMBDA-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1134 // LAMBDA-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1135 // LAMBDA-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1136 // LAMBDA-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, double**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1137 // LAMBDA-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1138 // LAMBDA-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1139 // LAMBDA-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1140 // LAMBDA-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1141 // LAMBDA-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1142 // LAMBDA-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1143 // LAMBDA-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1144 // LAMBDA-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1145 // LAMBDA-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1146 // LAMBDA-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1147 // LAMBDA-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, double**, i32**)*
1148 // LAMBDA-NEXT:    call void [[TMP25]](i8* [[TMP24]], double** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3:[0-9]+]]
1149 // LAMBDA-NEXT:    [[TMP26:%.*]] = load double*, double** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1150 // LAMBDA-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1151 // LAMBDA-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1152 // LAMBDA-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP28]] to i32
1153 // LAMBDA-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1154 // LAMBDA-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1155 // LAMBDA:       omp.inner.for.cond.i:
1156 // LAMBDA-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1157 // LAMBDA-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP29]] to i64
1158 // LAMBDA-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1159 // LAMBDA-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP30]]
1160 // LAMBDA-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__EXIT:%.*]]
1161 // LAMBDA:       omp.inner.for.body.i:
1162 // LAMBDA-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1163 // LAMBDA-NEXT:    store i32 [[TMP31]], i32* [[I_I]], align 4, !noalias !14
1164 // LAMBDA-NEXT:    store double 1.000000e+00, double* [[TMP26]], align 8
1165 // LAMBDA-NEXT:    store i32 11, i32* [[TMP27]], align 4
1166 // LAMBDA-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP_I]], i32 0, i32 0
1167 // LAMBDA-NEXT:    store double* [[TMP26]], double** [[TMP32]], align 8, !noalias !14
1168 // LAMBDA-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP_I]], i32 0, i32 1
1169 // LAMBDA-NEXT:    store i32* [[TMP27]], i32** [[TMP33]], align 8, !noalias !14
1170 // LAMBDA-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(16) [[REF_TMP_I]])
1171 // LAMBDA-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1172 // LAMBDA-NEXT:    [[ADD3_I:%.*]] = add nsw i32 [[TMP34]], 1
1173 // LAMBDA-NEXT:    store i32 [[ADD3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1174 // LAMBDA-NEXT:    br label [[OMP_INNER_FOR_COND_I]]
1175 // LAMBDA:       .omp_outlined..exit:
1176 // LAMBDA-NEXT:    ret i32 0
1177 //
1178 //
1179 // BLOCKS-LABEL: define {{[^@]+}}@main
1180 // BLOCKS-SAME: () #[[ATTR1:[0-9]+]] {
1181 // BLOCKS-NEXT:  entry:
1182 // BLOCKS-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1183 // BLOCKS-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1184 // BLOCKS-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1185 // BLOCKS-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
1186 // BLOCKS-NEXT:    call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
1187 // BLOCKS-NEXT:    ret i32 0
1188 //
1189 //
1190 // BLOCKS-LABEL: define {{[^@]+}}@__main_block_invoke
1191 // BLOCKS-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
1192 // BLOCKS-NEXT:  entry:
1193 // BLOCKS-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1194 // BLOCKS-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
1195 // BLOCKS-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1196 // BLOCKS-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
1197 // BLOCKS-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
1198 // BLOCKS-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @__main_block_invoke.omp_outlined to void (i32*, i32*, ...)*))
1199 // BLOCKS-NEXT:    ret void
1200 //
1201 //
1202 // BLOCKS-LABEL: define {{[^@]+}}@__main_block_invoke.omp_outlined
1203 // BLOCKS-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
1204 // BLOCKS-NEXT:  entry:
1205 // BLOCKS-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1206 // BLOCKS-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1207 // BLOCKS-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
1208 // BLOCKS-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1209 // BLOCKS-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1210 // BLOCKS-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1211 // BLOCKS-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1212 // BLOCKS-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1213 // BLOCKS-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1214 // BLOCKS-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1215 // BLOCKS-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1216 // BLOCKS:       omp_if.then:
1217 // BLOCKS-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1218 // BLOCKS-NEXT:    [[TMP4:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1219 // BLOCKS-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.kmp_task_t_with_privates*
1220 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 0
1221 // BLOCKS-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 1
1222 // BLOCKS-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 0
1223 // BLOCKS-NEXT:    [[TMP9:%.*]] = load volatile double, double* @g, align 8
1224 // BLOCKS-NEXT:    store volatile double [[TMP9]], double* [[TMP8]], align 8
1225 // BLOCKS-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 1
1226 // BLOCKS-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
1227 // BLOCKS-NEXT:    store i32 [[TMP11]], i32* [[TMP10]], align 8
1228 // BLOCKS-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 5
1229 // BLOCKS-NEXT:    store i64 0, i64* [[TMP12]], align 8
1230 // BLOCKS-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 6
1231 // BLOCKS-NEXT:    store i64 9, i64* [[TMP13]], align 8
1232 // BLOCKS-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 7
1233 // BLOCKS-NEXT:    store i64 1, i64* [[TMP14]], align 8
1234 // BLOCKS-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 9
1235 // BLOCKS-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i8*
1236 // BLOCKS-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP16]], i8 0, i64 8, i1 false)
1237 // BLOCKS-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP14]], align 8
1238 // BLOCKS-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP4]], i32 1, i64* [[TMP12]], i64* [[TMP13]], i64 [[TMP17]], i32 1, i32 0, i64 0, i8* null)
1239 // BLOCKS-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1240 // BLOCKS-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1241 // BLOCKS-NEXT:    br label [[OMP_IF_END]]
1242 // BLOCKS:       omp_if.end:
1243 // BLOCKS-NEXT:    ret void
1244 //
1245 //
1246 // BLOCKS-LABEL: define {{[^@]+}}@_block_invoke
1247 // BLOCKS-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
1248 // BLOCKS-NEXT:  entry:
1249 // BLOCKS-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1250 // BLOCKS-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*, align 8
1251 // BLOCKS-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1252 // BLOCKS-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*
1253 // BLOCKS-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>** [[BLOCK_ADDR]], align 8
1254 // BLOCKS-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 5
1255 // BLOCKS-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
1256 // BLOCKS-NEXT:    [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 6
1257 // BLOCKS-NEXT:    store i32 22, i32* [[BLOCK_CAPTURE_ADDR1]], align 8
1258 // BLOCKS-NEXT:    ret void
1259 //
1260 //
1261 // BLOCKS-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1262 // BLOCKS-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], double** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR6:[0-9]+]] {
1263 // BLOCKS-NEXT:  entry:
1264 // BLOCKS-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1265 // BLOCKS-NEXT:    [[DOTADDR1:%.*]] = alloca double**, align 8
1266 // BLOCKS-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
1267 // BLOCKS-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1268 // BLOCKS-NEXT:    store double** [[TMP1]], double*** [[DOTADDR1]], align 8
1269 // BLOCKS-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
1270 // BLOCKS-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1271 // BLOCKS-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1272 // BLOCKS-NEXT:    [[TMP5:%.*]] = load double**, double*** [[DOTADDR1]], align 8
1273 // BLOCKS-NEXT:    store double* [[TMP4]], double** [[TMP5]], align 8
1274 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1275 // BLOCKS-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
1276 // BLOCKS-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
1277 // BLOCKS-NEXT:    ret void
1278 //
1279 //
1280 // BLOCKS-LABEL: define {{[^@]+}}@.omp_task_entry.
1281 // BLOCKS-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
1282 // BLOCKS-NEXT:  entry:
1283 // BLOCKS-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1284 // BLOCKS-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1285 // BLOCKS-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1286 // BLOCKS-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1287 // BLOCKS-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1288 // BLOCKS-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1289 // BLOCKS-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1290 // BLOCKS-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1291 // BLOCKS-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1292 // BLOCKS-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1293 // BLOCKS-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1294 // BLOCKS-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca double*, align 8
1295 // BLOCKS-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
1296 // BLOCKS-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1297 // BLOCKS-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1298 // BLOCKS-NEXT:    [[BLOCK_I:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, align 8
1299 // BLOCKS-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1300 // BLOCKS-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1301 // BLOCKS-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1302 // BLOCKS-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1303 // BLOCKS-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1304 // BLOCKS-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1305 // BLOCKS-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1306 // BLOCKS-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1307 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1308 // BLOCKS-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1309 // BLOCKS-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1310 // BLOCKS-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1311 // BLOCKS-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1312 // BLOCKS-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1313 // BLOCKS-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1314 // BLOCKS-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1315 // BLOCKS-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1316 // BLOCKS-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1317 // BLOCKS-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1318 // BLOCKS-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1319 // BLOCKS-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1320 // BLOCKS-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1321 // BLOCKS-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1322 // BLOCKS-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1323 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1324 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1325 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1326 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1327 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1328 // BLOCKS-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1329 // BLOCKS-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1330 // BLOCKS-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1331 // BLOCKS-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, double**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1332 // BLOCKS-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1333 // BLOCKS-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1334 // BLOCKS-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1335 // BLOCKS-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1336 // BLOCKS-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1337 // BLOCKS-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1338 // BLOCKS-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1339 // BLOCKS-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1340 // BLOCKS-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1341 // BLOCKS-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1342 // BLOCKS-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, double**, i32**)*
1343 // BLOCKS-NEXT:    call void [[TMP25]](i8* [[TMP24]], double** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR4:[0-9]+]]
1344 // BLOCKS-NEXT:    [[TMP26:%.*]] = load double*, double** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1345 // BLOCKS-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1346 // BLOCKS-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1347 // BLOCKS-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP28]] to i32
1348 // BLOCKS-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1349 // BLOCKS-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1350 // BLOCKS:       omp.inner.for.cond.i:
1351 // BLOCKS-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1352 // BLOCKS-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP29]] to i64
1353 // BLOCKS-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1354 // BLOCKS-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP30]]
1355 // BLOCKS-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__EXIT:%.*]]
1356 // BLOCKS:       omp.inner.for.body.i:
1357 // BLOCKS-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1358 // BLOCKS-NEXT:    store i32 [[TMP31]], i32* [[I_I]], align 4, !noalias !14
1359 // BLOCKS-NEXT:    store double 1.000000e+00, double* [[TMP26]], align 8
1360 // BLOCKS-NEXT:    store i32 11, i32* [[TMP27]], align 4
1361 // BLOCKS-NEXT:    [[BLOCK_ISA_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 0
1362 // BLOCKS-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA_I]], align 8, !noalias !14
1363 // BLOCKS-NEXT:    [[BLOCK_FLAGS_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 1
1364 // BLOCKS-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS_I]], align 8, !noalias !14
1365 // BLOCKS-NEXT:    [[BLOCK_RESERVED_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 2
1366 // BLOCKS-NEXT:    store i32 0, i32* [[BLOCK_RESERVED_I]], align 4, !noalias !14
1367 // BLOCKS-NEXT:    [[BLOCK_INVOKE_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 3
1368 // BLOCKS-NEXT:    store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE_I]], align 8, !noalias !14
1369 // BLOCKS-NEXT:    [[BLOCK_DESCRIPTOR_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 4
1370 // BLOCKS-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !14
1371 // BLOCKS-NEXT:    [[BLOCK_CAPTURED_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 5
1372 // BLOCKS-NEXT:    [[TMP32:%.*]] = load volatile double, double* [[TMP26]], align 8
1373 // BLOCKS-NEXT:    store volatile double [[TMP32]], double* [[BLOCK_CAPTURED_I]], align 8, !noalias !14
1374 // BLOCKS-NEXT:    [[BLOCK_CAPTURED3_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 6
1375 // BLOCKS-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP27]], align 4
1376 // BLOCKS-NEXT:    store i32 [[TMP33]], i32* [[BLOCK_CAPTURED3_I]], align 8, !noalias !14
1377 // BLOCKS-NEXT:    [[TMP34:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]] to void ()*
1378 // BLOCKS-NEXT:    [[BLOCK_LITERAL_I:%.*]] = bitcast void ()* [[TMP34]] to %struct.__block_literal_generic*
1379 // BLOCKS-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL_I]], i32 0, i32 3
1380 // BLOCKS-NEXT:    [[TMP36:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL_I]] to i8*
1381 // BLOCKS-NEXT:    [[TMP37:%.*]] = load i8*, i8** [[TMP35]], align 8, !noalias !14
1382 // BLOCKS-NEXT:    [[TMP38:%.*]] = bitcast i8* [[TMP37]] to void (i8*)*
1383 // BLOCKS-NEXT:    call void [[TMP38]](i8* noundef [[TMP36]]) #[[ATTR4]]
1384 // BLOCKS-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1385 // BLOCKS-NEXT:    [[ADD4_I:%.*]] = add nsw i32 [[TMP39]], 1
1386 // BLOCKS-NEXT:    store i32 [[ADD4_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1387 // BLOCKS-NEXT:    br label [[OMP_INNER_FOR_COND_I]]
1388 // BLOCKS:       .omp_outlined..exit:
1389 // BLOCKS-NEXT:    ret i32 0
1390 //
1391 //
1392 // ARRAY-LABEL: define {{[^@]+}}@_Z10array_funciPfP2St
1393 // ARRAY-SAME: (i32 noundef [[N:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR0:[0-9]+]] {
1394 // ARRAY-NEXT:  entry:
1395 // ARRAY-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1396 // ARRAY-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
1397 // ARRAY-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
1398 // ARRAY-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1399 // ARRAY-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
1400 // ARRAY-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
1401 // ARRAY-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1402 // ARRAY-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
1403 // ARRAY-NEXT:    [[TMP2:%.*]] = load float*, float** [[A_ADDR]], align 8
1404 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct.St*, %struct.St** [[S_ADDR]], align 8
1405 // ARRAY-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, float*, %struct.St*)* @_Z10array_funciPfP2St.omp_outlined to void (i32*, i32*, ...)*), i64 [[TMP1]], float* [[TMP2]], %struct.St* [[TMP3]])
1406 // ARRAY-NEXT:    ret void
1407 //
1408 //
1409 // ARRAY-LABEL: define {{[^@]+}}@_Z10array_funciPfP2St.omp_outlined
1410 // ARRAY-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR1:[0-9]+]] {
1411 // ARRAY-NEXT:  entry:
1412 // ARRAY-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1413 // ARRAY-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1414 // ARRAY-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1415 // ARRAY-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
1416 // ARRAY-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
1417 // ARRAY-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
1418 // ARRAY-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1419 // ARRAY-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1420 // ARRAY-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1421 // ARRAY-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1422 // ARRAY-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
1423 // ARRAY-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
1424 // ARRAY-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1425 // ARRAY-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1426 // ARRAY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1427 // ARRAY-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1428 // ARRAY-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
1429 // ARRAY-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1430 // ARRAY:       omp_if.then:
1431 // ARRAY-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
1432 // ARRAY-NEXT:    store i64 [[TMP0]], i64* [[TMP5]], align 8
1433 // ARRAY-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1434 // ARRAY-NEXT:    [[TMP6:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i64 96, i64 8, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1435 // ARRAY-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to %struct.kmp_task_t_with_privates*
1436 // ARRAY-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP7]], i32 0, i32 0
1437 // ARRAY-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 0
1438 // ARRAY-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1439 // ARRAY-NEXT:    [[TMP11:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
1440 // ARRAY-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 [[TMP11]], i64 8, i1 false)
1441 // ARRAY-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP7]], i32 0, i32 1
1442 // ARRAY-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP12]], i32 0, i32 0
1443 // ARRAY-NEXT:    [[TMP14:%.*]] = load float*, float** [[A_ADDR]], align 8
1444 // ARRAY-NEXT:    store float* [[TMP14]], float** [[TMP13]], align 8
1445 // ARRAY-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP12]], i32 0, i32 1
1446 // ARRAY-NEXT:    [[TMP16:%.*]] = load %struct.St*, %struct.St** [[S_ADDR]], align 8
1447 // ARRAY-NEXT:    store %struct.St* [[TMP16]], %struct.St** [[TMP15]], align 8
1448 // ARRAY-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 5
1449 // ARRAY-NEXT:    store i64 0, i64* [[TMP17]], align 8
1450 // ARRAY-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 6
1451 // ARRAY-NEXT:    store i64 9, i64* [[TMP18]], align 8
1452 // ARRAY-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 7
1453 // ARRAY-NEXT:    store i64 1, i64* [[TMP19]], align 8
1454 // ARRAY-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 9
1455 // ARRAY-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i8*
1456 // ARRAY-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP21]], i8 0, i64 8, i1 false)
1457 // ARRAY-NEXT:    [[TMP22:%.*]] = load i64, i64* [[TMP19]], align 8
1458 // ARRAY-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* [[TMP6]], i32 1, i64* [[TMP17]], i64* [[TMP18]], i64 [[TMP22]], i32 1, i32 0, i64 0, i8* null)
1459 // ARRAY-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1460 // ARRAY-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1461 // ARRAY-NEXT:    br label [[OMP_IF_END]]
1462 // ARRAY:       omp_if.end:
1463 // ARRAY-NEXT:    ret void
1464 //
1465 //
1466 // ARRAY-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1467 // ARRAY-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], float*** noalias noundef [[TMP1:%.*]], %struct.St*** noalias noundef [[TMP2:%.*]]) #[[ATTR4:[0-9]+]] {
1468 // ARRAY-NEXT:  entry:
1469 // ARRAY-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1470 // ARRAY-NEXT:    [[DOTADDR1:%.*]] = alloca float***, align 8
1471 // ARRAY-NEXT:    [[DOTADDR2:%.*]] = alloca %struct.St***, align 8
1472 // ARRAY-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1473 // ARRAY-NEXT:    store float*** [[TMP1]], float**** [[DOTADDR1]], align 8
1474 // ARRAY-NEXT:    store %struct.St*** [[TMP2]], %struct.St**** [[DOTADDR2]], align 8
1475 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1476 // ARRAY-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1477 // ARRAY-NEXT:    [[TMP5:%.*]] = load float***, float**** [[DOTADDR1]], align 8
1478 // ARRAY-NEXT:    store float** [[TMP4]], float*** [[TMP5]], align 8
1479 // ARRAY-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1480 // ARRAY-NEXT:    [[TMP7:%.*]] = load %struct.St***, %struct.St**** [[DOTADDR2]], align 8
1481 // ARRAY-NEXT:    store %struct.St** [[TMP6]], %struct.St*** [[TMP7]], align 8
1482 // ARRAY-NEXT:    ret void
1483 //
1484 //
1485 // ARRAY-LABEL: define {{[^@]+}}@.omp_task_entry.
1486 // ARRAY-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1487 // ARRAY-NEXT:  entry:
1488 // ARRAY-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1489 // ARRAY-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1490 // ARRAY-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1491 // ARRAY-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1492 // ARRAY-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1493 // ARRAY-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1494 // ARRAY-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1495 // ARRAY-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1496 // ARRAY-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1497 // ARRAY-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1498 // ARRAY-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1499 // ARRAY-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca float**, align 8
1500 // ARRAY-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca %struct.St**, align 8
1501 // ARRAY-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1502 // ARRAY-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1503 // ARRAY-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1504 // ARRAY-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1505 // ARRAY-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1506 // ARRAY-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1507 // ARRAY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1508 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1509 // ARRAY-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1510 // ARRAY-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1511 // ARRAY-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1512 // ARRAY-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1513 // ARRAY-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1514 // ARRAY-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1515 // ARRAY-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1516 // ARRAY-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1517 // ARRAY-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1518 // ARRAY-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1519 // ARRAY-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1520 // ARRAY-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1521 // ARRAY-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1522 // ARRAY-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1523 // ARRAY-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1524 // ARRAY-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1525 // ARRAY-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1526 // ARRAY-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1527 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1528 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1529 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1530 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1531 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1532 // ARRAY-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1533 // ARRAY-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1534 // ARRAY-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1535 // ARRAY-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, float***, %struct.St***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1536 // ARRAY-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1537 // ARRAY-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1538 // ARRAY-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1539 // ARRAY-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1540 // ARRAY-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1541 // ARRAY-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1542 // ARRAY-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1543 // ARRAY-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1544 // ARRAY-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 0
1545 // ARRAY-NEXT:    [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
1546 // ARRAY-NEXT:    [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1547 // ARRAY-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1548 // ARRAY-NEXT:    [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, float***, %struct.St***)*
1549 // ARRAY-NEXT:    call void [[TMP27]](i8* [[TMP26]], float*** [[DOTFIRSTPRIV_PTR_ADDR_I]], %struct.St*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR2:[0-9]+]]
1550 // ARRAY-NEXT:    [[TMP28:%.*]] = load float**, float*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1551 // ARRAY-NEXT:    [[TMP29:%.*]] = load %struct.St**, %struct.St*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1552 // ARRAY-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1553 // ARRAY-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP30]] to i32
1554 // ARRAY-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1555 // ARRAY-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1556 // ARRAY:       omp.inner.for.cond.i:
1557 // ARRAY-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1558 // ARRAY-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP31]] to i64
1559 // ARRAY-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1560 // ARRAY-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP32]]
1561 // ARRAY-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__EXIT:%.*]]
1562 // ARRAY:       omp.inner.for.body.i:
1563 // ARRAY-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1564 // ARRAY-NEXT:    store i32 [[TMP33]], i32* [[I_I]], align 4, !noalias !14
1565 // ARRAY-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1566 // ARRAY-NEXT:    [[ADD3_I:%.*]] = add nsw i32 [[TMP34]], 1
1567 // ARRAY-NEXT:    store i32 [[ADD3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1568 // ARRAY-NEXT:    br label [[OMP_INNER_FOR_COND_I]]
1569 // ARRAY:       .omp_outlined..exit:
1570 // ARRAY-NEXT:    ret i32 0
1571 //
1572 //
1573 // SIMD-ONLY0-LABEL: define {{[^@]+}}@main
1574 // SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
1575 // SIMD-ONLY0-NEXT:  entry:
1576 // SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1577 // SIMD-ONLY0-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1578 // SIMD-ONLY0-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
1579 // SIMD-ONLY0-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1580 // SIMD-ONLY0-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1581 // SIMD-ONLY0-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
1582 // SIMD-ONLY0-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
1583 // SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
1584 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1585 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
1586 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
1587 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1588 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1589 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1590 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1591 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
1592 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1593 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
1594 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
1595 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[I]], align 4
1596 // SIMD-ONLY0-NEXT:    br label [[FOR_COND:%.*]]
1597 // SIMD-ONLY0:       for.cond:
1598 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
1599 // SIMD-ONLY0-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10
1600 // SIMD-ONLY0-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
1601 // SIMD-ONLY0:       for.body:
1602 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
1603 // SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
1604 // SIMD-ONLY0-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX]], align 4
1605 // SIMD-ONLY0-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1606 // SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
1607 // SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1608 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
1609 // SIMD-ONLY0-NEXT:    store i32 33, i32* @_ZZ4mainE5sivar, align 4
1610 // SIMD-ONLY0-NEXT:    br label [[FOR_INC:%.*]]
1611 // SIMD-ONLY0:       for.inc:
1612 // SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4
1613 // SIMD-ONLY0-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
1614 // SIMD-ONLY0-NEXT:    store i32 [[INC]], i32* [[I]], align 4
1615 // SIMD-ONLY0-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
1616 // SIMD-ONLY0:       for.end:
1617 // SIMD-ONLY0-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
1618 // SIMD-ONLY0-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1619 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
1620 // SIMD-ONLY0-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1621 // SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1622 // SIMD-ONLY0-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1623 // SIMD-ONLY0:       arraydestroy.body:
1624 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP6]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1625 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1626 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1627 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1628 // SIMD-ONLY0-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1629 // SIMD-ONLY0:       arraydestroy.done2:
1630 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
1631 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
1632 // SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
1633 // SIMD-ONLY0-NEXT:    ret i32 [[TMP7]]
1634 //
1635 //
1636 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
1637 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1638 // SIMD-ONLY0-NEXT:  entry:
1639 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1640 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1641 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1642 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
1643 // SIMD-ONLY0-NEXT:    ret void
1644 //
1645 //
1646 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
1647 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1648 // SIMD-ONLY0-NEXT:  entry:
1649 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1650 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
1651 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
1652 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1653 // SIMD-ONLY0-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
1654 // SIMD-ONLY0-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
1655 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1656 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
1657 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
1658 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
1659 // SIMD-ONLY0-NEXT:    ret void
1660 //
1661 //
1662 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
1663 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1664 // SIMD-ONLY0-NEXT:  entry:
1665 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1666 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
1667 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1668 // SIMD-ONLY0-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
1669 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1670 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
1671 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
1672 // SIMD-ONLY0-NEXT:    ret void
1673 //
1674 //
1675 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1676 // SIMD-ONLY0-SAME: () #[[ATTR3:[0-9]+]] {
1677 // SIMD-ONLY0-NEXT:  entry:
1678 // SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1679 // SIMD-ONLY0-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1680 // SIMD-ONLY0-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
1681 // SIMD-ONLY0-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
1682 // SIMD-ONLY0-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1683 // SIMD-ONLY0-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1684 // SIMD-ONLY0-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
1685 // SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
1686 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
1687 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
1688 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[T_VAR]], align 128
1689 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1690 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
1691 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1692 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
1693 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1694 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
1695 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
1696 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[I]], align 4
1697 // SIMD-ONLY0-NEXT:    br label [[FOR_COND:%.*]]
1698 // SIMD-ONLY0:       for.cond:
1699 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
1700 // SIMD-ONLY0-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10
1701 // SIMD-ONLY0-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
1702 // SIMD-ONLY0:       for.body:
1703 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 128
1704 // SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
1705 // SIMD-ONLY0-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX]], align 4
1706 // SIMD-ONLY0-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1707 // SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = bitcast %struct.S.0* [[ARRAYIDX1]] to i8*
1708 // SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
1709 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP3]], i8* align 4 [[TMP4]], i64 4, i1 false)
1710 // SIMD-ONLY0-NEXT:    br label [[FOR_INC:%.*]]
1711 // SIMD-ONLY0:       for.inc:
1712 // SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4
1713 // SIMD-ONLY0-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
1714 // SIMD-ONLY0-NEXT:    store i32 [[INC]], i32* [[I]], align 4
1715 // SIMD-ONLY0-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
1716 // SIMD-ONLY0:       for.end:
1717 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1718 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1719 // SIMD-ONLY0-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1720 // SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1721 // SIMD-ONLY0-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1722 // SIMD-ONLY0:       arraydestroy.body:
1723 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP6]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1724 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1725 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1726 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1727 // SIMD-ONLY0-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1728 // SIMD-ONLY0:       arraydestroy.done2:
1729 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1730 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
1731 // SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
1732 // SIMD-ONLY0-NEXT:    ret i32 [[TMP7]]
1733 //
1734 //
1735 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
1736 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1737 // SIMD-ONLY0-NEXT:  entry:
1738 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1739 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1740 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1741 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
1742 // SIMD-ONLY0-NEXT:    ret void
1743 //
1744 //
1745 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
1746 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1747 // SIMD-ONLY0-NEXT:  entry:
1748 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1749 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1750 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1751 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1752 // SIMD-ONLY0-NEXT:    store double 0.000000e+00, double* [[F]], align 8
1753 // SIMD-ONLY0-NEXT:    ret void
1754 //
1755 //
1756 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
1757 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1758 // SIMD-ONLY0-NEXT:  entry:
1759 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1760 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1761 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1762 // SIMD-ONLY0-NEXT:    ret void
1763 //
1764 //
1765 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
1766 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1767 // SIMD-ONLY0-NEXT:  entry:
1768 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1769 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
1770 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
1771 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1772 // SIMD-ONLY0-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
1773 // SIMD-ONLY0-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
1774 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1775 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1776 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
1777 // SIMD-ONLY0-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
1778 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
1779 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
1780 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
1781 // SIMD-ONLY0-NEXT:    store double [[ADD]], double* [[F]], align 8
1782 // SIMD-ONLY0-NEXT:    ret void
1783 //
1784 //
1785 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
1786 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1787 // SIMD-ONLY0-NEXT:  entry:
1788 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1789 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
1790 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1791 // SIMD-ONLY0-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
1792 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1793 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1794 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
1795 // SIMD-ONLY0-NEXT:    store double [[TMP0]], double* [[F]], align 8
1796 // SIMD-ONLY0-NEXT:    ret void
1797 //
1798 //
1799 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1800 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1801 // SIMD-ONLY0-NEXT:  entry:
1802 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1803 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1804 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1805 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
1806 // SIMD-ONLY0-NEXT:    ret void
1807 //
1808 //
1809 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
1810 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1811 // SIMD-ONLY0-NEXT:  entry:
1812 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1813 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1814 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1815 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1816 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1817 // SIMD-ONLY0-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1818 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1819 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1820 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
1821 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
1822 // SIMD-ONLY0-NEXT:    ret void
1823 //
1824 //
1825 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1826 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1827 // SIMD-ONLY0-NEXT:  entry:
1828 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1829 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1830 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1831 // SIMD-ONLY0-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1832 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1833 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1834 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
1835 // SIMD-ONLY0-NEXT:    ret void
1836 //
1837 //
1838 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1839 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1840 // SIMD-ONLY0-NEXT:  entry:
1841 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1842 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1843 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1844 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1845 // SIMD-ONLY0-NEXT:    ret void
1846 //
1847 //
1848 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1849 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1850 // SIMD-ONLY0-NEXT:  entry:
1851 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1852 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1853 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1854 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1855 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[F]], align 4
1856 // SIMD-ONLY0-NEXT:    ret void
1857 //
1858 //
1859 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
1860 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1861 // SIMD-ONLY0-NEXT:  entry:
1862 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1863 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1864 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1865 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1866 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1867 // SIMD-ONLY0-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1868 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1869 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1870 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1871 // SIMD-ONLY0-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
1872 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
1873 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
1874 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
1875 // SIMD-ONLY0-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1876 // SIMD-ONLY0-NEXT:    ret void
1877 //
1878 //
1879 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1880 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1881 // SIMD-ONLY0-NEXT:  entry:
1882 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1883 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1884 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1885 // SIMD-ONLY0-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1886 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1887 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1888 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1889 // SIMD-ONLY0-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1890 // SIMD-ONLY0-NEXT:    ret void
1891 //
1892 //
1893 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1894 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1895 // SIMD-ONLY0-NEXT:  entry:
1896 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1897 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1898 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1899 // SIMD-ONLY0-NEXT:    ret void
1900 //
1901 //
1902 // SIMD-ONLY1-LABEL: define {{[^@]+}}@main
1903 // SIMD-ONLY1-SAME: () #[[ATTR0:[0-9]+]] {
1904 // SIMD-ONLY1-NEXT:  entry:
1905 // SIMD-ONLY1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1906 // SIMD-ONLY1-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1907 // SIMD-ONLY1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
1908 // SIMD-ONLY1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1909 // SIMD-ONLY1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1910 // SIMD-ONLY1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
1911 // SIMD-ONLY1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
1912 // SIMD-ONLY1-NEXT:    [[I:%.*]] = alloca i32, align 4
1913 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1914 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
1915 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
1916 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1917 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1918 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1919 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1920 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
1921 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1922 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
1923 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
1924 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[I]], align 4
1925 // SIMD-ONLY1-NEXT:    br label [[FOR_COND:%.*]]
1926 // SIMD-ONLY1:       for.cond:
1927 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
1928 // SIMD-ONLY1-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10
1929 // SIMD-ONLY1-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
1930 // SIMD-ONLY1:       for.body:
1931 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 4
1932 // SIMD-ONLY1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
1933 // SIMD-ONLY1-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX]], align 4
1934 // SIMD-ONLY1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1935 // SIMD-ONLY1-NEXT:    [[TMP3:%.*]] = bitcast %struct.S* [[ARRAYIDX1]] to i8*
1936 // SIMD-ONLY1-NEXT:    [[TMP4:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1937 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP3]], i8* align 8 [[TMP4]], i64 8, i1 false)
1938 // SIMD-ONLY1-NEXT:    store i32 33, i32* @_ZZ4mainE5sivar, align 4
1939 // SIMD-ONLY1-NEXT:    br label [[FOR_INC:%.*]]
1940 // SIMD-ONLY1:       for.inc:
1941 // SIMD-ONLY1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4
1942 // SIMD-ONLY1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
1943 // SIMD-ONLY1-NEXT:    store i32 [[INC]], i32* [[I]], align 4
1944 // SIMD-ONLY1-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
1945 // SIMD-ONLY1:       for.end:
1946 // SIMD-ONLY1-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
1947 // SIMD-ONLY1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1948 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
1949 // SIMD-ONLY1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1950 // SIMD-ONLY1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1951 // SIMD-ONLY1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1952 // SIMD-ONLY1:       arraydestroy.body:
1953 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP6]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1954 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1955 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1956 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1957 // SIMD-ONLY1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
1958 // SIMD-ONLY1:       arraydestroy.done2:
1959 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
1960 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
1961 // SIMD-ONLY1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
1962 // SIMD-ONLY1-NEXT:    ret i32 [[TMP7]]
1963 //
1964 //
1965 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
1966 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1967 // SIMD-ONLY1-NEXT:  entry:
1968 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1969 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1970 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1971 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
1972 // SIMD-ONLY1-NEXT:    ret void
1973 //
1974 //
1975 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
1976 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1977 // SIMD-ONLY1-NEXT:  entry:
1978 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1979 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
1980 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
1981 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1982 // SIMD-ONLY1-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
1983 // SIMD-ONLY1-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
1984 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1985 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
1986 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
1987 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
1988 // SIMD-ONLY1-NEXT:    ret void
1989 //
1990 //
1991 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
1992 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1993 // SIMD-ONLY1-NEXT:  entry:
1994 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1995 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
1996 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1997 // SIMD-ONLY1-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
1998 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1999 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
2000 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
2001 // SIMD-ONLY1-NEXT:    ret void
2002 //
2003 //
2004 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2005 // SIMD-ONLY1-SAME: () #[[ATTR3:[0-9]+]] {
2006 // SIMD-ONLY1-NEXT:  entry:
2007 // SIMD-ONLY1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2008 // SIMD-ONLY1-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2009 // SIMD-ONLY1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
2010 // SIMD-ONLY1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
2011 // SIMD-ONLY1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
2012 // SIMD-ONLY1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
2013 // SIMD-ONLY1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
2014 // SIMD-ONLY1-NEXT:    [[I:%.*]] = alloca i32, align 4
2015 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
2016 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
2017 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[T_VAR]], align 128
2018 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2019 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
2020 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
2021 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
2022 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
2023 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
2024 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
2025 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[I]], align 4
2026 // SIMD-ONLY1-NEXT:    br label [[FOR_COND:%.*]]
2027 // SIMD-ONLY1:       for.cond:
2028 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[I]], align 4
2029 // SIMD-ONLY1-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP1]], 10
2030 // SIMD-ONLY1-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
2031 // SIMD-ONLY1:       for.body:
2032 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_VAR]], align 128
2033 // SIMD-ONLY1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
2034 // SIMD-ONLY1-NEXT:    store i32 [[TMP2]], i32* [[ARRAYIDX]], align 4
2035 // SIMD-ONLY1-NEXT:    [[ARRAYIDX1:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
2036 // SIMD-ONLY1-NEXT:    [[TMP3:%.*]] = bitcast %struct.S.0* [[ARRAYIDX1]] to i8*
2037 // SIMD-ONLY1-NEXT:    [[TMP4:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
2038 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP3]], i8* align 4 [[TMP4]], i64 4, i1 false)
2039 // SIMD-ONLY1-NEXT:    br label [[FOR_INC:%.*]]
2040 // SIMD-ONLY1:       for.inc:
2041 // SIMD-ONLY1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[I]], align 4
2042 // SIMD-ONLY1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP5]], 1
2043 // SIMD-ONLY1-NEXT:    store i32 [[INC]], i32* [[I]], align 4
2044 // SIMD-ONLY1-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP4:![0-9]+]]
2045 // SIMD-ONLY1:       for.end:
2046 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2047 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
2048 // SIMD-ONLY1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
2049 // SIMD-ONLY1-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
2050 // SIMD-ONLY1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2051 // SIMD-ONLY1:       arraydestroy.body:
2052 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP6]], [[FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2053 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2054 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2055 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
2056 // SIMD-ONLY1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
2057 // SIMD-ONLY1:       arraydestroy.done2:
2058 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
2059 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
2060 // SIMD-ONLY1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
2061 // SIMD-ONLY1-NEXT:    ret i32 [[TMP7]]
2062 //
2063 //
2064 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
2065 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2066 // SIMD-ONLY1-NEXT:  entry:
2067 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2068 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2069 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2070 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
2071 // SIMD-ONLY1-NEXT:    ret void
2072 //
2073 //
2074 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
2075 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2076 // SIMD-ONLY1-NEXT:  entry:
2077 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2078 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2079 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2080 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2081 // SIMD-ONLY1-NEXT:    store double 0.000000e+00, double* [[F]], align 8
2082 // SIMD-ONLY1-NEXT:    ret void
2083 //
2084 //
2085 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
2086 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2087 // SIMD-ONLY1-NEXT:  entry:
2088 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2089 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2090 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2091 // SIMD-ONLY1-NEXT:    ret void
2092 //
2093 //
2094 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
2095 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2096 // SIMD-ONLY1-NEXT:  entry:
2097 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2098 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
2099 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
2100 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2101 // SIMD-ONLY1-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
2102 // SIMD-ONLY1-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
2103 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2104 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2105 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
2106 // SIMD-ONLY1-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
2107 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
2108 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
2109 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
2110 // SIMD-ONLY1-NEXT:    store double [[ADD]], double* [[F]], align 8
2111 // SIMD-ONLY1-NEXT:    ret void
2112 //
2113 //
2114 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
2115 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2116 // SIMD-ONLY1-NEXT:  entry:
2117 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2118 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
2119 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2120 // SIMD-ONLY1-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
2121 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2122 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2123 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
2124 // SIMD-ONLY1-NEXT:    store double [[TMP0]], double* [[F]], align 8
2125 // SIMD-ONLY1-NEXT:    ret void
2126 //
2127 //
2128 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
2129 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2130 // SIMD-ONLY1-NEXT:  entry:
2131 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2132 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2133 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2134 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
2135 // SIMD-ONLY1-NEXT:    ret void
2136 //
2137 //
2138 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
2139 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2140 // SIMD-ONLY1-NEXT:  entry:
2141 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2142 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
2143 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
2144 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2145 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
2146 // SIMD-ONLY1-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
2147 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2148 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
2149 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
2150 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
2151 // SIMD-ONLY1-NEXT:    ret void
2152 //
2153 //
2154 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
2155 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2156 // SIMD-ONLY1-NEXT:  entry:
2157 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2158 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2159 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2160 // SIMD-ONLY1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2161 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2162 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2163 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
2164 // SIMD-ONLY1-NEXT:    ret void
2165 //
2166 //
2167 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
2168 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2169 // SIMD-ONLY1-NEXT:  entry:
2170 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2171 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2172 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2173 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2174 // SIMD-ONLY1-NEXT:    ret void
2175 //
2176 //
2177 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2178 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2179 // SIMD-ONLY1-NEXT:  entry:
2180 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2181 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2182 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2183 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2184 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[F]], align 4
2185 // SIMD-ONLY1-NEXT:    ret void
2186 //
2187 //
2188 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
2189 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2190 // SIMD-ONLY1-NEXT:  entry:
2191 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2192 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
2193 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
2194 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2195 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
2196 // SIMD-ONLY1-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
2197 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2198 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2199 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
2200 // SIMD-ONLY1-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
2201 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
2202 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
2203 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
2204 // SIMD-ONLY1-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
2205 // SIMD-ONLY1-NEXT:    ret void
2206 //
2207 //
2208 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2209 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2210 // SIMD-ONLY1-NEXT:  entry:
2211 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2212 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2213 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2214 // SIMD-ONLY1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2215 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2216 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2217 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2218 // SIMD-ONLY1-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2219 // SIMD-ONLY1-NEXT:    ret void
2220 //
2221 //
2222 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2223 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2224 // SIMD-ONLY1-NEXT:  entry:
2225 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2226 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2227 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2228 // SIMD-ONLY1-NEXT:    ret void
2229 //
2230 //
2231 // SIMD-ONLY2-LABEL: define {{[^@]+}}@main
2232 // SIMD-ONLY2-SAME: () #[[ATTR0:[0-9]+]] {
2233 // SIMD-ONLY2-NEXT:  entry:
2234 // SIMD-ONLY2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2235 // SIMD-ONLY2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2236 // SIMD-ONLY2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2237 // SIMD-ONLY2-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
2238 // SIMD-ONLY2-NEXT:    ret i32 0
2239 //
2240 //
2241 // SIMD-ONLY3-LABEL: define {{[^@]+}}@main
2242 // SIMD-ONLY3-SAME: () #[[ATTR1:[0-9]+]] {
2243 // SIMD-ONLY3-NEXT:  entry:
2244 // SIMD-ONLY3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2245 // SIMD-ONLY3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2246 // SIMD-ONLY3-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
2247 // SIMD-ONLY3-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
2248 // SIMD-ONLY3-NEXT:    call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
2249 // SIMD-ONLY3-NEXT:    ret i32 0
2250 //
2251 //
2252 // SIMD-ONLY3-LABEL: define {{[^@]+}}@__main_block_invoke
2253 // SIMD-ONLY3-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
2254 // SIMD-ONLY3-NEXT:  entry:
2255 // SIMD-ONLY3-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2256 // SIMD-ONLY3-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2257 // SIMD-ONLY3-NEXT:    [[I:%.*]] = alloca i32, align 4
2258 // SIMD-ONLY3-NEXT:    [[BLOCK1:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, align 8
2259 // SIMD-ONLY3-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2260 // SIMD-ONLY3-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2261 // SIMD-ONLY3-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2262 // SIMD-ONLY3-NEXT:    store i32 0, i32* [[I]], align 4
2263 // SIMD-ONLY3-NEXT:    br label [[FOR_COND:%.*]]
2264 // SIMD-ONLY3:       for.cond:
2265 // SIMD-ONLY3-NEXT:    [[TMP0:%.*]] = load i32, i32* [[I]], align 4
2266 // SIMD-ONLY3-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP0]], 10
2267 // SIMD-ONLY3-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
2268 // SIMD-ONLY3:       for.body:
2269 // SIMD-ONLY3-NEXT:    store double 1.000000e+00, double* @g, align 8
2270 // SIMD-ONLY3-NEXT:    store i32 11, i32* @_ZZ4mainE5sivar, align 4
2271 // SIMD-ONLY3-NEXT:    [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 0
2272 // SIMD-ONLY3-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
2273 // SIMD-ONLY3-NEXT:    [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 1
2274 // SIMD-ONLY3-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
2275 // SIMD-ONLY3-NEXT:    [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 2
2276 // SIMD-ONLY3-NEXT:    store i32 0, i32* [[BLOCK_RESERVED]], align 4
2277 // SIMD-ONLY3-NEXT:    [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 3
2278 // SIMD-ONLY3-NEXT:    store i8* bitcast (void (i8*)* @__main_block_invoke_2 to i8*), i8** [[BLOCK_INVOKE]], align 8
2279 // SIMD-ONLY3-NEXT:    [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 4
2280 // SIMD-ONLY3-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
2281 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 5
2282 // SIMD-ONLY3-NEXT:    [[TMP1:%.*]] = load volatile double, double* @g, align 8
2283 // SIMD-ONLY3-NEXT:    store volatile double [[TMP1]], double* [[BLOCK_CAPTURED]], align 8
2284 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURED2:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]], i32 0, i32 6
2285 // SIMD-ONLY3-NEXT:    [[TMP2:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
2286 // SIMD-ONLY3-NEXT:    store i32 [[TMP2]], i32* [[BLOCK_CAPTURED2]], align 8
2287 // SIMD-ONLY3-NEXT:    [[TMP3:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK1]] to void ()*
2288 // SIMD-ONLY3-NEXT:    [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP3]] to %struct.__block_literal_generic*
2289 // SIMD-ONLY3-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
2290 // SIMD-ONLY3-NEXT:    [[TMP5:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
2291 // SIMD-ONLY3-NEXT:    [[TMP6:%.*]] = load i8*, i8** [[TMP4]], align 8
2292 // SIMD-ONLY3-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to void (i8*)*
2293 // SIMD-ONLY3-NEXT:    call void [[TMP7]](i8* noundef [[TMP5]])
2294 // SIMD-ONLY3-NEXT:    br label [[FOR_INC:%.*]]
2295 // SIMD-ONLY3:       for.inc:
2296 // SIMD-ONLY3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[I]], align 4
2297 // SIMD-ONLY3-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP8]], 1
2298 // SIMD-ONLY3-NEXT:    store i32 [[INC]], i32* [[I]], align 4
2299 // SIMD-ONLY3-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
2300 // SIMD-ONLY3:       for.end:
2301 // SIMD-ONLY3-NEXT:    ret void
2302 //
2303 //
2304 // SIMD-ONLY3-LABEL: define {{[^@]+}}@__main_block_invoke_2
2305 // SIMD-ONLY3-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
2306 // SIMD-ONLY3-NEXT:  entry:
2307 // SIMD-ONLY3-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2308 // SIMD-ONLY3-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*, align 8
2309 // SIMD-ONLY3-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2310 // SIMD-ONLY3-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*
2311 // SIMD-ONLY3-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>** [[BLOCK_ADDR]], align 8
2312 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 5
2313 // SIMD-ONLY3-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
2314 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 6
2315 // SIMD-ONLY3-NEXT:    store i32 22, i32* [[BLOCK_CAPTURE_ADDR1]], align 8
2316 // SIMD-ONLY3-NEXT:    ret void
2317 //
2318 //
2319 // SIMD-ONLY4-LABEL: define {{[^@]+}}@_Z10array_funciPfP2St
2320 // SIMD-ONLY4-SAME: (i32 noundef [[N:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR0:[0-9]+]] {
2321 // SIMD-ONLY4-NEXT:  entry:
2322 // SIMD-ONLY4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2323 // SIMD-ONLY4-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
2324 // SIMD-ONLY4-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
2325 // SIMD-ONLY4-NEXT:    [[I:%.*]] = alloca i32, align 4
2326 // SIMD-ONLY4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2327 // SIMD-ONLY4-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
2328 // SIMD-ONLY4-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
2329 // SIMD-ONLY4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
2330 // SIMD-ONLY4-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
2331 // SIMD-ONLY4-NEXT:    store i32 0, i32* [[I]], align 4
2332 // SIMD-ONLY4-NEXT:    br label [[FOR_COND:%.*]]
2333 // SIMD-ONLY4:       for.cond:
2334 // SIMD-ONLY4-NEXT:    [[TMP2:%.*]] = load i32, i32* [[I]], align 4
2335 // SIMD-ONLY4-NEXT:    [[CMP:%.*]] = icmp slt i32 [[TMP2]], 10
2336 // SIMD-ONLY4-NEXT:    br i1 [[CMP]], label [[FOR_BODY:%.*]], label [[FOR_END:%.*]]
2337 // SIMD-ONLY4:       for.body:
2338 // SIMD-ONLY4-NEXT:    br label [[FOR_INC:%.*]]
2339 // SIMD-ONLY4:       for.inc:
2340 // SIMD-ONLY4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[I]], align 4
2341 // SIMD-ONLY4-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP3]], 1
2342 // SIMD-ONLY4-NEXT:    store i32 [[INC]], i32* [[I]], align 4
2343 // SIMD-ONLY4-NEXT:    br label [[FOR_COND]], !llvm.loop [[LOOP2:![0-9]+]]
2344 // SIMD-ONLY4:       for.end:
2345 // SIMD-ONLY4-NEXT:    ret void
2346 //
2347