xref: /llvm-project/clang/test/OpenMP/parallel_master_taskloop_simd_firstprivate_codegen.cpp (revision 65a0d669b4625c34775436a6d3643d15bbc2465a)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=LAMBDA %s
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=BLOCKS %s
7 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck -check-prefix=ARRAY %s
8 
9 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
11 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY1 %s
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY2 %s
13 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY3 %s
14 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DARRAY -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY4 %s
15 // expected-no-diagnostics
16 
17 #ifndef ARRAY
18 #ifndef HEADER
19 #define HEADER
20 
21 template <class T>
22 struct S {
23   T f;
24   S(T a) : f(a) {}
25   S() : f() {}
26   S(const S &s, T t = T()) : f(s.f + t) {}
27   operator T() { return T(); }
28   ~S() {}
29 };
30 
31 volatile double g;
32 
33 template <typename T>
34 T tmain() {
35   S<T> ttt;
36   S<T> test(ttt);
37   T t_var __attribute__((aligned(128))) = T();
38   T vec[] = {1, 2};
39   S<T> s_arr[] = {1, 2};
40   S<T> var(3);
41 #pragma omp parallel master taskloop simd firstprivate(t_var, vec, s_arr, s_arr, var, var)
42   for (int i = 0; i < 10; ++i) {
43     vec[0] = t_var;
44     s_arr[0] = var;
45   }
46   return T();
47 }
48 
49 int main() {
50   static int sivar;
51 #ifdef LAMBDA
52   [&]() {
53 
54 
55 #pragma omp parallel master taskloop simd firstprivate(g, sivar)
56   for (int i = 0; i < 10; ++i) {
57 
58     g = 1;
59     sivar = 11;
60     [&]() {
61       g = 2;
62       sivar = 22;
63     }();
64   }
65   }();
66   return 0;
67 #elif defined(BLOCKS)
68   ^{
69 
70 #pragma omp parallel master taskloop simd firstprivate(g, sivar)
71   for (int i = 0; i < 10; ++i) {
72 
73     g = 1;
74     sivar = 11;
75     ^{
76       g = 2;
77       sivar = 22;
78     }();
79   }
80   }();
81   return 0;
82 #else
83   S<double> ttt;
84   S<double> test(ttt);
85   int t_var = 0;
86   int vec[] = {1, 2};
87   S<double> s_arr[] = {1, 2};
88   S<double> var(3);
89 #pragma omp parallel master taskloop simd firstprivate(var, t_var, s_arr, vec, s_arr, var, sivar)
90   for (int i = 0; i < 10; ++i) {
91     vec[0] = t_var;
92     s_arr[0] = var;
93     sivar = 33;
94   }
95   return tmain<int>();
96 #endif
97 }
98 
99 
100 
101 // Store original variables in capture struct.
102 
103 // Allocate task.
104 // Returns struct kmp_task_t {
105 //         [[KMP_TASK_T]] task_data;
106 //         [[KMP_TASK_MAIN_TY]] privates;
107 //       };
108 
109 // Fill kmp_task_t->shareds by copying from original capture argument.
110 
111 // Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
112 // Also copy address of private copy to the corresponding shareds reference.
113 
114 // Constructors for s_arr and var.
115 // s_arr;
116 
117 // var;
118 
119 // t_var;
120 
121 // vec;
122 
123 // sivar;
124 
125 // Provide pointer to destructor function, which will destroy private variables at the end of the task.
126 
127 // Start task.
128 
129 
130 
131 
132 
133 
134 // Privates actually are used.
135 
136 
137 
138 
139 
140 
141 
142 
143 // Store original variables in capture struct.
144 
145 // Allocate task.
146 // Returns struct kmp_task_t {
147 //         [[KMP_TASK_T_TY]] task_data;
148 //         [[KMP_TASK_TMAIN_TY]] privates;
149 //       };
150 
151 // Fill kmp_task_t->shareds by copying from original capture argument.
152 
153 // Initialize kmp_task_t->privates with default values (no init for simple types, default constructors for classes).
154 
155 // t_var;
156 
157 // vec;
158 
159 // Constructors for s_arr and var.
160 // a_arr;
161 
162 // var;
163 
164 // Provide pointer to destructor function, which will destroy private variables at the end of the task.
165 
166 // Start task.
167 
168 
169 
170 // Privates actually are used.
171 
172 
173 
174 
175 
176 
177 #endif
178 #else
179 struct St {
180   int a, b;
181   St() : a(0), b(0) {}
182   St(const St &) {}
183   ~St() {}
184 };
185 
186 void array_func(int n, float a[n], St s[2]) {
187 #pragma omp parallel master taskloop simd firstprivate(a, s)
188   for (int i = 0; i < 10; ++i)
189     ;
190 }
191 #endif
192 
193 // CHECK-LABEL: define {{[^@]+}}@main
194 // CHECK-SAME: () #[[ATTR0:[0-9]+]] {
195 // CHECK-NEXT:  entry:
196 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
197 // CHECK-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
198 // CHECK-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
199 // CHECK-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
200 // CHECK-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
201 // CHECK-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
202 // CHECK-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
203 // CHECK-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
204 // CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
205 // CHECK-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
206 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
207 // CHECK-NEXT:    store i32 0, i32* [[T_VAR]], align 4
208 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
209 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
210 // CHECK-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
211 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
212 // CHECK-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
213 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
214 // CHECK-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
215 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 4
216 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
217 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
218 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
219 // CHECK-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S]*, %struct.S*)* @.omp_outlined. to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i64 [[TMP2]], [2 x %struct.S]* [[S_ARR]], %struct.S* [[VAR]])
220 // CHECK-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
221 // CHECK-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
222 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
223 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
224 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
225 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
226 // CHECK:       arraydestroy.body:
227 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
228 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
229 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
230 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
231 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
232 // CHECK:       arraydestroy.done1:
233 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
234 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
235 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
236 // CHECK-NEXT:    ret i32 [[TMP4]]
237 //
238 //
239 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
240 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
241 // CHECK-NEXT:  entry:
242 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
243 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
244 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
245 // CHECK-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
246 // CHECK-NEXT:    ret void
247 //
248 //
249 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
250 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
251 // CHECK-NEXT:  entry:
252 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
253 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
254 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
255 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
256 // CHECK-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
257 // CHECK-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
258 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
259 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
260 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
261 // CHECK-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
262 // CHECK-NEXT:    ret void
263 //
264 //
265 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
266 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
267 // CHECK-NEXT:  entry:
268 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
269 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
270 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
271 // CHECK-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
272 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
273 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
274 // CHECK-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
275 // CHECK-NEXT:    ret void
276 //
277 //
278 // CHECK-LABEL: define {{[^@]+}}@.omp_outlined.
279 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 noundef [[T_VAR:%.*]], [2 x %struct.S]* noundef nonnull align 8 dereferenceable(16) [[S_ARR:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR:%.*]]) #[[ATTR3:[0-9]+]] {
280 // CHECK-NEXT:  entry:
281 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
282 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
283 // CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
284 // CHECK-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
285 // CHECK-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S]*, align 8
286 // CHECK-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S*, align 8
287 // CHECK-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
288 // CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
289 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
290 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
291 // CHECK-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
292 // CHECK-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
293 // CHECK-NEXT:    store [2 x %struct.S]* [[S_ARR]], [2 x %struct.S]** [[S_ARR_ADDR]], align 8
294 // CHECK-NEXT:    store %struct.S* [[VAR]], %struct.S** [[VAR_ADDR]], align 8
295 // CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
296 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
297 // CHECK-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[S_ARR_ADDR]], align 8
298 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.S*, %struct.S** [[VAR_ADDR]], align 8
299 // CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
300 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
301 // CHECK-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
302 // CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
303 // CHECK-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
304 // CHECK:       omp_if.then:
305 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
306 // CHECK-NEXT:    store [2 x %struct.S]* [[TMP1]], [2 x %struct.S]** [[TMP7]], align 8
307 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 1
308 // CHECK-NEXT:    store %struct.S* [[TMP2]], %struct.S** [[TMP8]], align 8
309 // CHECK-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
310 // CHECK-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 9, i64 120, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
311 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct.kmp_task_t_with_privates*
312 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP10]], i32 0, i32 0
313 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 0
314 // CHECK-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 8
315 // CHECK-NEXT:    [[TMP14:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
316 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 [[TMP14]], i64 16, i1 false)
317 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP10]], i32 0, i32 1
318 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 0
319 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP16]], i32 0, i32 0
320 // CHECK-NEXT:    [[TMP17:%.*]] = bitcast [2 x %struct.S]* [[TMP1]] to %struct.S*
321 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
322 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP18]]
323 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
324 // CHECK:       omp.arraycpy.body:
325 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP17]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
326 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
327 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]], double noundef 0.000000e+00)
328 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
329 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
330 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP18]]
331 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
332 // CHECK:       omp.arraycpy.done1:
333 // CHECK-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 1
334 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP19]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP2]], double noundef 0.000000e+00)
335 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 2
336 // CHECK-NEXT:    [[TMP21:%.*]] = load i32, i32* [[CONV]], align 4
337 // CHECK-NEXT:    store i32 [[TMP21]], i32* [[TMP20]], align 8
338 // CHECK-NEXT:    [[TMP22:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 3
339 // CHECK-NEXT:    [[TMP23:%.*]] = bitcast [2 x i32]* [[TMP22]] to i8*
340 // CHECK-NEXT:    [[TMP24:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
341 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP23]], i8* align 4 [[TMP24]], i64 8, i1 false)
342 // CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP15]], i32 0, i32 4
343 // CHECK-NEXT:    [[TMP26:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
344 // CHECK-NEXT:    store i32 [[TMP26]], i32* [[TMP25]], align 4
345 // CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 3
346 // CHECK-NEXT:    [[TMP28:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP27]] to i32 (i32, i8*)**
347 // CHECK-NEXT:    store i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_destructor. to i32 (i32, i8*)*), i32 (i32, i8*)** [[TMP28]], align 8
348 // CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 5
349 // CHECK-NEXT:    store i64 0, i64* [[TMP29]], align 8
350 // CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 6
351 // CHECK-NEXT:    store i64 9, i64* [[TMP30]], align 8
352 // CHECK-NEXT:    [[TMP31:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 7
353 // CHECK-NEXT:    store i64 1, i64* [[TMP31]], align 8
354 // CHECK-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 9
355 // CHECK-NEXT:    [[TMP33:%.*]] = bitcast i8** [[TMP32]] to i8*
356 // CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP33]], i8 0, i64 8, i1 false)
357 // CHECK-NEXT:    [[TMP34:%.*]] = load i64, i64* [[TMP31]], align 8
358 // CHECK-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP9]], i32 1, i64* [[TMP29]], i64* [[TMP30]], i64 [[TMP34]], i32 1, i32 0, i64 0, i8* bitcast (void (%struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates*, i32)* @.omp_task_dup. to i8*))
359 // CHECK-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
360 // CHECK-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
361 // CHECK-NEXT:    br label [[OMP_IF_END]]
362 // CHECK:       omp_if.end:
363 // CHECK-NEXT:    ret void
364 //
365 //
366 // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map.
367 // CHECK-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], %struct.S** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]], [2 x %struct.S]** noalias noundef [[TMP3:%.*]], [2 x i32]** noalias noundef [[TMP4:%.*]], i32** noalias noundef [[TMP5:%.*]]) #[[ATTR6:[0-9]+]] {
368 // CHECK-NEXT:  entry:
369 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
370 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.S**, align 8
371 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
372 // CHECK-NEXT:    [[DOTADDR3:%.*]] = alloca [2 x %struct.S]**, align 8
373 // CHECK-NEXT:    [[DOTADDR4:%.*]] = alloca [2 x i32]**, align 8
374 // CHECK-NEXT:    [[DOTADDR5:%.*]] = alloca i32**, align 8
375 // CHECK-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
376 // CHECK-NEXT:    store %struct.S** [[TMP1]], %struct.S*** [[DOTADDR1]], align 8
377 // CHECK-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
378 // CHECK-NEXT:    store [2 x %struct.S]** [[TMP3]], [2 x %struct.S]*** [[DOTADDR3]], align 8
379 // CHECK-NEXT:    store [2 x i32]** [[TMP4]], [2 x i32]*** [[DOTADDR4]], align 8
380 // CHECK-NEXT:    store i32** [[TMP5]], i32*** [[DOTADDR5]], align 8
381 // CHECK-NEXT:    [[TMP6:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
382 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 0
383 // CHECK-NEXT:    [[TMP8:%.*]] = load [2 x %struct.S]**, [2 x %struct.S]*** [[DOTADDR3]], align 8
384 // CHECK-NEXT:    store [2 x %struct.S]* [[TMP7]], [2 x %struct.S]** [[TMP8]], align 8
385 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 1
386 // CHECK-NEXT:    [[TMP10:%.*]] = load %struct.S**, %struct.S*** [[DOTADDR1]], align 8
387 // CHECK-NEXT:    store %struct.S* [[TMP9]], %struct.S** [[TMP10]], align 8
388 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 2
389 // CHECK-NEXT:    [[TMP12:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
390 // CHECK-NEXT:    store i32* [[TMP11]], i32** [[TMP12]], align 8
391 // CHECK-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 3
392 // CHECK-NEXT:    [[TMP14:%.*]] = load [2 x i32]**, [2 x i32]*** [[DOTADDR4]], align 8
393 // CHECK-NEXT:    store [2 x i32]* [[TMP13]], [2 x i32]** [[TMP14]], align 8
394 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP6]], i32 0, i32 4
395 // CHECK-NEXT:    [[TMP16:%.*]] = load i32**, i32*** [[DOTADDR5]], align 8
396 // CHECK-NEXT:    store i32* [[TMP15]], i32** [[TMP16]], align 8
397 // CHECK-NEXT:    ret void
398 //
399 //
400 // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry.
401 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
402 // CHECK-NEXT:  entry:
403 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
404 // CHECK-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
405 // CHECK-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
406 // CHECK-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
407 // CHECK-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
408 // CHECK-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
409 // CHECK-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
410 // CHECK-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
411 // CHECK-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
412 // CHECK-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
413 // CHECK-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
414 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca %struct.S*, align 8
415 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
416 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x %struct.S]*, align 8
417 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca [2 x i32]*, align 8
418 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR4_I:%.*]] = alloca i32*, align 8
419 // CHECK-NEXT:    [[I_I:%.*]] = alloca i32, align 4
420 // CHECK-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
421 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
422 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
423 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
424 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
425 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
426 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
427 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
428 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
429 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
430 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
431 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
432 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
433 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
434 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
435 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
436 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
437 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
438 // CHECK-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
439 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
440 // CHECK-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
441 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
442 // CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
443 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
444 // CHECK-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
445 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
446 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
447 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
448 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
449 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
450 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
451 // CHECK-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
452 // CHECK-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
453 // CHECK-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, %struct.S**, i32**, [2 x %struct.S]**, [2 x i32]**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
454 // CHECK-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
455 // CHECK-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
456 // CHECK-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
457 // CHECK-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
458 // CHECK-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
459 // CHECK-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
460 // CHECK-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
461 // CHECK-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
462 // CHECK-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
463 // CHECK-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
464 // CHECK-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, %struct.S**, i32**, [2 x %struct.S]**, [2 x i32]**, i32**)*
465 // CHECK-NEXT:    call void [[TMP25]](i8* [[TMP24]], %struct.S** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x %struct.S]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR4_I]]) #[[ATTR4]]
466 // CHECK-NEXT:    [[TMP26:%.*]] = load %struct.S*, %struct.S** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
467 // CHECK-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
468 // CHECK-NEXT:    [[TMP28:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !14
469 // CHECK-NEXT:    [[TMP29:%.*]] = load [2 x i32]*, [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !14
470 // CHECK-NEXT:    [[TMP30:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR4_I]], align 8, !noalias !14
471 // CHECK-NEXT:    [[TMP31:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
472 // CHECK-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP31]] to i32
473 // CHECK-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
474 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
475 // CHECK:       omp.inner.for.cond.i:
476 // CHECK-NEXT:    [[TMP32:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]]
477 // CHECK-NEXT:    [[CONV5_I:%.*]] = sext i32 [[TMP32]] to i64
478 // CHECK-NEXT:    [[TMP33:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
479 // CHECK-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV5_I]], [[TMP33]]
480 // CHECK-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
481 // CHECK:       omp.inner.for.body.i:
482 // CHECK-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
483 // CHECK-NEXT:    store i32 [[TMP34]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
484 // CHECK-NEXT:    [[TMP35:%.*]] = load i32, i32* [[TMP27]], align 4, !llvm.access.group [[ACC_GRP15]]
485 // CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP29]], i64 0, i64 0
486 // CHECK-NEXT:    store i32 [[TMP35]], i32* [[ARRAYIDX_I]], align 4, !llvm.access.group [[ACC_GRP15]]
487 // CHECK-NEXT:    [[ARRAYIDX6_I:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP28]], i64 0, i64 0
488 // CHECK-NEXT:    [[TMP36:%.*]] = bitcast %struct.S* [[ARRAYIDX6_I]] to i8*
489 // CHECK-NEXT:    [[TMP37:%.*]] = bitcast %struct.S* [[TMP26]] to i8*
490 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP36]], i8* align 8 [[TMP37]], i64 8, i1 false), !llvm.access.group [[ACC_GRP15]]
491 // CHECK-NEXT:    store i32 33, i32* [[TMP30]], align 4, !llvm.access.group [[ACC_GRP15]]
492 // CHECK-NEXT:    [[TMP38:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
493 // CHECK-NEXT:    [[ADD7_I:%.*]] = add nsw i32 [[TMP38]], 1
494 // CHECK-NEXT:    store i32 [[ADD7_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
495 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
496 // CHECK:       .omp_outlined..1.exit:
497 // CHECK-NEXT:    ret i32 0
498 //
499 //
500 // CHECK-LABEL: define {{[^@]+}}@.omp_task_dup.
501 // CHECK-SAME: (%struct.kmp_task_t_with_privates* noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noundef [[TMP1:%.*]], i32 noundef [[TMP2:%.*]]) #[[ATTR7]] {
502 // CHECK-NEXT:  entry:
503 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
504 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
505 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32, align 4
506 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP0]], %struct.kmp_task_t_with_privates** [[DOTADDR]], align 8
507 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
508 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTADDR2]], align 4
509 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR]], align 8
510 // CHECK-NEXT:    [[TMP4:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
511 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP4]], i32 0, i32 0
512 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP5]], i32 0, i32 0
513 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
514 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
515 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
516 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP8]], i32 0, i32 0
517 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP9]], i32 0, i32 0
518 // CHECK-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S]*, [2 x %struct.S]** [[TMP11]], align 8
519 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP10]], i32 0, i32 0
520 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x %struct.S]* [[TMP12]] to %struct.S*
521 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
522 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S* [[ARRAY_BEGIN]], [[TMP14]]
523 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE3:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
524 // CHECK:       omp.arraycpy.body:
525 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
526 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
527 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[OMP_ARRAYCPY_SRCELEMENTPAST]], double noundef 0.000000e+00)
528 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
529 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S]], %struct.S* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
530 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
531 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE3]], label [[OMP_ARRAYCPY_BODY]]
532 // CHECK:       omp.arraycpy.done3:
533 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP8]], i32 0, i32 1
534 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[TMP9]], i32 0, i32 1
535 // CHECK-NEXT:    [[TMP17:%.*]] = load %struct.S*, %struct.S** [[TMP16]], align 8
536 // CHECK-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP15]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP17]], double noundef 0.000000e+00)
537 // CHECK-NEXT:    ret void
538 //
539 //
540 // CHECK-LABEL: define {{[^@]+}}@.omp_task_destructor.
541 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
542 // CHECK-NEXT:  entry:
543 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
544 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
545 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
546 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
547 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
548 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
549 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP2]], i32 0, i32 1
550 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
551 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
552 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP5]]) #[[ATTR4]]
553 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[TMP4]], i32 0, i32 0
554 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[ARRAY_BEGIN]], i64 2
555 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
556 // CHECK:       arraydestroy.body:
557 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
558 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
559 // CHECK-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
560 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
561 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
562 // CHECK:       arraydestroy.done2:
563 // CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
564 // CHECK-NEXT:    ret i32 [[TMP7]]
565 //
566 //
567 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
568 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
569 // CHECK-NEXT:  entry:
570 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
571 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
572 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
573 // CHECK-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
574 // CHECK-NEXT:    ret void
575 //
576 //
577 // CHECK-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
578 // CHECK-SAME: () #[[ATTR9:[0-9]+]] {
579 // CHECK-NEXT:  entry:
580 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
581 // CHECK-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
582 // CHECK-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
583 // CHECK-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
584 // CHECK-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
585 // CHECK-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
586 // CHECK-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
587 // CHECK-NEXT:    [[T_VAR_CASTED:%.*]] = alloca i64, align 8
588 // CHECK-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
589 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
590 // CHECK-NEXT:    store i32 0, i32* [[T_VAR]], align 128
591 // CHECK-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
592 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
593 // CHECK-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
594 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
595 // CHECK-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
596 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
597 // CHECK-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
598 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_VAR]], align 128
599 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_CASTED]] to i32*
600 // CHECK-NEXT:    store i32 [[TMP1]], i32* [[CONV]], align 4
601 // CHECK-NEXT:    [[TMP2:%.*]] = load i64, i64* [[T_VAR_CASTED]], align 8
602 // CHECK-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1]], i32 4, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, [2 x i32]*, i64, [2 x %struct.S.0]*, %struct.S.0*)* @.omp_outlined..2 to void (i32*, i32*, ...)*), [2 x i32]* [[VEC]], i64 [[TMP2]], [2 x %struct.S.0]* [[S_ARR]], %struct.S.0* [[VAR]])
603 // CHECK-NEXT:    store i32 0, i32* [[RETVAL]], align 4
604 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
605 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
606 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
607 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
608 // CHECK:       arraydestroy.body:
609 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP3]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
610 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
611 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
612 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
613 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]]
614 // CHECK:       arraydestroy.done1:
615 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
616 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
617 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[RETVAL]], align 4
618 // CHECK-NEXT:    ret i32 [[TMP4]]
619 //
620 //
621 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
622 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
623 // CHECK-NEXT:  entry:
624 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
625 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
626 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
627 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
628 // CHECK-NEXT:    store double 0.000000e+00, double* [[F]], align 8
629 // CHECK-NEXT:    ret void
630 //
631 //
632 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
633 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
634 // CHECK-NEXT:  entry:
635 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
636 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
637 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
638 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
639 // CHECK-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
640 // CHECK-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
641 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
642 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
643 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
644 // CHECK-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
645 // CHECK-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
646 // CHECK-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
647 // CHECK-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
648 // CHECK-NEXT:    store double [[ADD]], double* [[F]], align 8
649 // CHECK-NEXT:    ret void
650 //
651 //
652 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
653 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
654 // CHECK-NEXT:  entry:
655 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
656 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
657 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
658 // CHECK-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
659 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
660 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
661 // CHECK-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
662 // CHECK-NEXT:    store double [[TMP0]], double* [[F]], align 8
663 // CHECK-NEXT:    ret void
664 //
665 //
666 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
667 // CHECK-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
668 // CHECK-NEXT:  entry:
669 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
670 // CHECK-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
671 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
672 // CHECK-NEXT:    ret void
673 //
674 //
675 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
676 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
677 // CHECK-NEXT:  entry:
678 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
679 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
680 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
681 // CHECK-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
682 // CHECK-NEXT:    ret void
683 //
684 //
685 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
686 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
687 // CHECK-NEXT:  entry:
688 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
689 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
690 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
691 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
692 // CHECK-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
693 // CHECK-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
694 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
695 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
696 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
697 // CHECK-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
698 // CHECK-NEXT:    ret void
699 //
700 //
701 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
702 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
703 // CHECK-NEXT:  entry:
704 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
705 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
706 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
707 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
708 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
709 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
710 // CHECK-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
711 // CHECK-NEXT:    ret void
712 //
713 //
714 // CHECK-LABEL: define {{[^@]+}}@.omp_outlined..2
715 // CHECK-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], [2 x i32]* noundef nonnull align 4 dereferenceable(8) [[VEC:%.*]], i64 noundef [[T_VAR:%.*]], [2 x %struct.S.0]* noundef nonnull align 4 dereferenceable(8) [[S_ARR:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR:%.*]]) #[[ATTR3]] {
716 // CHECK-NEXT:  entry:
717 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
718 // CHECK-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
719 // CHECK-NEXT:    [[VEC_ADDR:%.*]] = alloca [2 x i32]*, align 8
720 // CHECK-NEXT:    [[T_VAR_ADDR:%.*]] = alloca i64, align 8
721 // CHECK-NEXT:    [[S_ARR_ADDR:%.*]] = alloca [2 x %struct.S.0]*, align 8
722 // CHECK-NEXT:    [[VAR_ADDR:%.*]] = alloca %struct.S.0*, align 8
723 // CHECK-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON_1:%.*]], align 8
724 // CHECK-NEXT:    [[TMP:%.*]] = alloca i32, align 4
725 // CHECK-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
726 // CHECK-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
727 // CHECK-NEXT:    store [2 x i32]* [[VEC]], [2 x i32]** [[VEC_ADDR]], align 8
728 // CHECK-NEXT:    store i64 [[T_VAR]], i64* [[T_VAR_ADDR]], align 8
729 // CHECK-NEXT:    store [2 x %struct.S.0]* [[S_ARR]], [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
730 // CHECK-NEXT:    store %struct.S.0* [[VAR]], %struct.S.0** [[VAR_ADDR]], align 8
731 // CHECK-NEXT:    [[TMP0:%.*]] = load [2 x i32]*, [2 x i32]** [[VEC_ADDR]], align 8
732 // CHECK-NEXT:    [[CONV:%.*]] = bitcast i64* [[T_VAR_ADDR]] to i32*
733 // CHECK-NEXT:    [[TMP1:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[S_ARR_ADDR]], align 8
734 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.S.0*, %struct.S.0** [[VAR_ADDR]], align 8
735 // CHECK-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
736 // CHECK-NEXT:    [[TMP4:%.*]] = load i32, i32* [[TMP3]], align 4
737 // CHECK-NEXT:    [[TMP5:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
738 // CHECK-NEXT:    [[TMP6:%.*]] = icmp ne i32 [[TMP5]], 0
739 // CHECK-NEXT:    br i1 [[TMP6]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
740 // CHECK:       omp_if.then:
741 // CHECK-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 0
742 // CHECK-NEXT:    store [2 x %struct.S.0]* [[TMP1]], [2 x %struct.S.0]** [[TMP7]], align 8
743 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[AGG_CAPTURED]], i32 0, i32 1
744 // CHECK-NEXT:    store %struct.S.0* [[TMP2]], %struct.S.0** [[TMP8]], align 8
745 // CHECK-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
746 // CHECK-NEXT:    [[TMP9:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i32 9, i64 256, i64 16, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_entry..5 to i32 (i32, i8*)*))
747 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to %struct.kmp_task_t_with_privates.2*
748 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP10]], i32 0, i32 0
749 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 0
750 // CHECK-NEXT:    [[TMP13:%.*]] = load i8*, i8** [[TMP12]], align 128
751 // CHECK-NEXT:    [[TMP14:%.*]] = bitcast %struct.anon.1* [[AGG_CAPTURED]] to i8*
752 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP13]], i8* align 8 [[TMP14]], i64 16, i1 false)
753 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP10]], i32 0, i32 2
754 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 0
755 // CHECK-NEXT:    [[TMP17:%.*]] = load i32, i32* [[CONV]], align 4
756 // CHECK-NEXT:    store i32 [[TMP17]], i32* [[TMP16]], align 128
757 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 1
758 // CHECK-NEXT:    [[TMP19:%.*]] = bitcast [2 x i32]* [[TMP18]] to i8*
759 // CHECK-NEXT:    [[TMP20:%.*]] = bitcast [2 x i32]* [[TMP0]] to i8*
760 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP19]], i8* align 4 [[TMP20]], i64 8, i1 false)
761 // CHECK-NEXT:    [[TMP21:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 2
762 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP21]], i32 0, i32 0
763 // CHECK-NEXT:    [[TMP22:%.*]] = bitcast [2 x %struct.S.0]* [[TMP1]] to %struct.S.0*
764 // CHECK-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
765 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP23]]
766 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE1:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
767 // CHECK:       omp.arraycpy.body:
768 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP22]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
769 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[OMP_IF_THEN]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
770 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 noundef 0)
771 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
772 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
773 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP23]]
774 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE1]], label [[OMP_ARRAYCPY_BODY]]
775 // CHECK:       omp.arraycpy.done1:
776 // CHECK-NEXT:    [[TMP24:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP15]], i32 0, i32 3
777 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP24]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP2]], i32 noundef 0)
778 // CHECK-NEXT:    [[TMP25:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 3
779 // CHECK-NEXT:    [[TMP26:%.*]] = bitcast %union.kmp_cmplrdata_t* [[TMP25]] to i32 (i32, i8*)**
780 // CHECK-NEXT:    store i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates.2*)* @.omp_task_destructor..7 to i32 (i32, i8*)*), i32 (i32, i8*)** [[TMP26]], align 8
781 // CHECK-NEXT:    [[TMP27:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 5
782 // CHECK-NEXT:    store i64 0, i64* [[TMP27]], align 8
783 // CHECK-NEXT:    [[TMP28:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 6
784 // CHECK-NEXT:    store i64 9, i64* [[TMP28]], align 16
785 // CHECK-NEXT:    [[TMP29:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 7
786 // CHECK-NEXT:    store i64 1, i64* [[TMP29]], align 8
787 // CHECK-NEXT:    [[TMP30:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP11]], i32 0, i32 9
788 // CHECK-NEXT:    [[TMP31:%.*]] = bitcast i8** [[TMP30]] to i8*
789 // CHECK-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP31]], i8 0, i64 8, i1 false)
790 // CHECK-NEXT:    [[TMP32:%.*]] = load i64, i64* [[TMP29]], align 8
791 // CHECK-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]], i8* [[TMP9]], i32 1, i64* [[TMP27]], i64* [[TMP28]], i64 [[TMP32]], i32 1, i32 0, i64 0, i8* bitcast (void (%struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2*, i32)* @.omp_task_dup..6 to i8*))
792 // CHECK-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
793 // CHECK-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP4]])
794 // CHECK-NEXT:    br label [[OMP_IF_END]]
795 // CHECK:       omp_if.end:
796 // CHECK-NEXT:    ret void
797 //
798 //
799 // CHECK-LABEL: define {{[^@]+}}@.omp_task_privates_map..4
800 // CHECK-SAME: (%struct..kmp_privates.t.3* noalias noundef [[TMP0:%.*]], i32** noalias noundef [[TMP1:%.*]], [2 x i32]** noalias noundef [[TMP2:%.*]], [2 x %struct.S.0]** noalias noundef [[TMP3:%.*]], %struct.S.0** noalias noundef [[TMP4:%.*]]) #[[ATTR6]] {
801 // CHECK-NEXT:  entry:
802 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t.3*, align 8
803 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca i32**, align 8
804 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca [2 x i32]**, align 8
805 // CHECK-NEXT:    [[DOTADDR3:%.*]] = alloca [2 x %struct.S.0]**, align 8
806 // CHECK-NEXT:    [[DOTADDR4:%.*]] = alloca %struct.S.0**, align 8
807 // CHECK-NEXT:    store %struct..kmp_privates.t.3* [[TMP0]], %struct..kmp_privates.t.3** [[DOTADDR]], align 8
808 // CHECK-NEXT:    store i32** [[TMP1]], i32*** [[DOTADDR1]], align 8
809 // CHECK-NEXT:    store [2 x i32]** [[TMP2]], [2 x i32]*** [[DOTADDR2]], align 8
810 // CHECK-NEXT:    store [2 x %struct.S.0]** [[TMP3]], [2 x %struct.S.0]*** [[DOTADDR3]], align 8
811 // CHECK-NEXT:    store %struct.S.0** [[TMP4]], %struct.S.0*** [[DOTADDR4]], align 8
812 // CHECK-NEXT:    [[TMP5:%.*]] = load %struct..kmp_privates.t.3*, %struct..kmp_privates.t.3** [[DOTADDR]], align 8
813 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 0
814 // CHECK-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR1]], align 8
815 // CHECK-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
816 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 1
817 // CHECK-NEXT:    [[TMP9:%.*]] = load [2 x i32]**, [2 x i32]*** [[DOTADDR2]], align 8
818 // CHECK-NEXT:    store [2 x i32]* [[TMP8]], [2 x i32]** [[TMP9]], align 8
819 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 2
820 // CHECK-NEXT:    [[TMP11:%.*]] = load [2 x %struct.S.0]**, [2 x %struct.S.0]*** [[DOTADDR3]], align 8
821 // CHECK-NEXT:    store [2 x %struct.S.0]* [[TMP10]], [2 x %struct.S.0]** [[TMP11]], align 8
822 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP5]], i32 0, i32 3
823 // CHECK-NEXT:    [[TMP13:%.*]] = load %struct.S.0**, %struct.S.0*** [[DOTADDR4]], align 8
824 // CHECK-NEXT:    store %struct.S.0* [[TMP12]], %struct.S.0** [[TMP13]], align 8
825 // CHECK-NEXT:    ret void
826 //
827 //
828 // CHECK-LABEL: define {{[^@]+}}@.omp_task_entry..5
829 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
830 // CHECK-NEXT:  entry:
831 // CHECK-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
832 // CHECK-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
833 // CHECK-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
834 // CHECK-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
835 // CHECK-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
836 // CHECK-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
837 // CHECK-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
838 // CHECK-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
839 // CHECK-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
840 // CHECK-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
841 // CHECK-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon.1*, align 8
842 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca i32*, align 8
843 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca [2 x i32]*, align 8
844 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR2_I:%.*]] = alloca [2 x %struct.S.0]*, align 8
845 // CHECK-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR3_I:%.*]] = alloca %struct.S.0*, align 8
846 // CHECK-NEXT:    [[I_I:%.*]] = alloca i32, align 4
847 // CHECK-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
848 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
849 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
850 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
851 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
852 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
853 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
854 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 0
855 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
856 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
857 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 128
858 // CHECK-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
859 // CHECK-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 2
860 // CHECK-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t.3* [[TMP9]] to i8*
861 // CHECK-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates.2* [[TMP3]] to i8*
862 // CHECK-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
863 // CHECK-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
864 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
865 // CHECK-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 16
866 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
867 // CHECK-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
868 // CHECK-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
869 // CHECK-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 64
870 // CHECK-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
871 // CHECK-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
872 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META21:![0-9]+]])
873 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META24:![0-9]+]])
874 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META26:![0-9]+]])
875 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META28:![0-9]+]])
876 // CHECK-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META30:![0-9]+]])
877 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !32
878 // CHECK-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !32
879 // CHECK-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32
880 // CHECK-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t.3*, i32**, [2 x i32]**, [2 x %struct.S.0]**, %struct.S.0**)* @.omp_task_privates_map..4 to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32
881 // CHECK-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !32
882 // CHECK-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !32
883 // CHECK-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !32
884 // CHECK-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !32
885 // CHECK-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !32
886 // CHECK-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !32
887 // CHECK-NEXT:    store %struct.anon.1* [[TMP8]], %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !32
888 // CHECK-NEXT:    [[TMP22:%.*]] = load %struct.anon.1*, %struct.anon.1** [[__CONTEXT_ADDR_I]], align 8, !noalias !32
889 // CHECK-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !32
890 // CHECK-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !32
891 // CHECK-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, i32**, [2 x i32]**, [2 x %struct.S.0]**, %struct.S.0**)*
892 // CHECK-NEXT:    call void [[TMP25]](i8* [[TMP24]], i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], [2 x %struct.S.0]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], %struct.S.0** [[DOTFIRSTPRIV_PTR_ADDR3_I]]) #[[ATTR4]]
893 // CHECK-NEXT:    [[TMP26:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !32
894 // CHECK-NEXT:    [[TMP27:%.*]] = load [2 x i32]*, [2 x i32]** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !32
895 // CHECK-NEXT:    [[TMP28:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[DOTFIRSTPRIV_PTR_ADDR2_I]], align 8, !noalias !32
896 // CHECK-NEXT:    [[TMP29:%.*]] = load %struct.S.0*, %struct.S.0** [[DOTFIRSTPRIV_PTR_ADDR3_I]], align 8, !noalias !32
897 // CHECK-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !32
898 // CHECK-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP30]] to i32
899 // CHECK-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !32
900 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
901 // CHECK:       omp.inner.for.cond.i:
902 // CHECK-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33:![0-9]+]]
903 // CHECK-NEXT:    [[CONV4_I:%.*]] = sext i32 [[TMP31]] to i64
904 // CHECK-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !32, !llvm.access.group [[ACC_GRP33]]
905 // CHECK-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV4_I]], [[TMP32]]
906 // CHECK-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__3_EXIT:%.*]]
907 // CHECK:       omp.inner.for.body.i:
908 // CHECK-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]]
909 // CHECK-NEXT:    store i32 [[TMP33]], i32* [[I_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]]
910 // CHECK-NEXT:    [[TMP34:%.*]] = load i32, i32* [[TMP26]], align 128, !llvm.access.group [[ACC_GRP33]]
911 // CHECK-NEXT:    [[ARRAYIDX_I:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[TMP27]], i64 0, i64 0
912 // CHECK-NEXT:    store i32 [[TMP34]], i32* [[ARRAYIDX_I]], align 4, !llvm.access.group [[ACC_GRP33]]
913 // CHECK-NEXT:    [[ARRAYIDX5_I:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP28]], i64 0, i64 0
914 // CHECK-NEXT:    [[TMP35:%.*]] = bitcast %struct.S.0* [[ARRAYIDX5_I]] to i8*
915 // CHECK-NEXT:    [[TMP36:%.*]] = bitcast %struct.S.0* [[TMP29]] to i8*
916 // CHECK-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP35]], i8* align 4 [[TMP36]], i64 4, i1 false), !llvm.access.group [[ACC_GRP33]]
917 // CHECK-NEXT:    [[TMP37:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]]
918 // CHECK-NEXT:    [[ADD6_I:%.*]] = add nsw i32 [[TMP37]], 1
919 // CHECK-NEXT:    store i32 [[ADD6_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !32, !llvm.access.group [[ACC_GRP33]]
920 // CHECK-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP34:![0-9]+]]
921 // CHECK:       .omp_outlined..3.exit:
922 // CHECK-NEXT:    ret i32 0
923 //
924 //
925 // CHECK-LABEL: define {{[^@]+}}@.omp_task_dup..6
926 // CHECK-SAME: (%struct.kmp_task_t_with_privates.2* noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noundef [[TMP1:%.*]], i32 noundef [[TMP2:%.*]]) #[[ATTR7]] {
927 // CHECK-NEXT:  entry:
928 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
929 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
930 // CHECK-NEXT:    [[DOTADDR2:%.*]] = alloca i32, align 4
931 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP0]], %struct.kmp_task_t_with_privates.2** [[DOTADDR]], align 8
932 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
933 // CHECK-NEXT:    store i32 [[TMP2]], i32* [[DOTADDR2]], align 4
934 // CHECK-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR]], align 8
935 // CHECK-NEXT:    [[TMP4:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
936 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP4]], i32 0, i32 0
937 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP5]], i32 0, i32 0
938 // CHECK-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 128
939 // CHECK-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2]], %struct.kmp_task_t_with_privates.2* [[TMP3]], i32 0, i32 2
940 // CHECK-NEXT:    [[TMP9:%.*]] = bitcast i8* [[TMP7]] to %struct.anon.1*
941 // CHECK-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP8]], i32 0, i32 2
942 // CHECK-NEXT:    [[TMP11:%.*]] = getelementptr inbounds [[STRUCT_ANON_1:%.*]], %struct.anon.1* [[TMP9]], i32 0, i32 0
943 // CHECK-NEXT:    [[TMP12:%.*]] = load [2 x %struct.S.0]*, [2 x %struct.S.0]** [[TMP11]], align 8
944 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP10]], i32 0, i32 0
945 // CHECK-NEXT:    [[TMP13:%.*]] = bitcast [2 x %struct.S.0]* [[TMP12]] to %struct.S.0*
946 // CHECK-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
947 // CHECK-NEXT:    [[OMP_ARRAYCPY_ISEMPTY:%.*]] = icmp eq %struct.S.0* [[ARRAY_BEGIN]], [[TMP14]]
948 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_ISEMPTY]], label [[OMP_ARRAYCPY_DONE3:%.*]], label [[OMP_ARRAYCPY_BODY:%.*]]
949 // CHECK:       omp.arraycpy.body:
950 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRCELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP13]], [[ENTRY:%.*]] ], [ [[OMP_ARRAYCPY_SRC_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
951 // CHECK-NEXT:    [[OMP_ARRAYCPY_DESTELEMENTPAST:%.*]] = phi %struct.S.0* [ [[ARRAY_BEGIN]], [[ENTRY]] ], [ [[OMP_ARRAYCPY_DEST_ELEMENT:%.*]], [[OMP_ARRAYCPY_BODY]] ]
952 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_DESTELEMENTPAST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 noundef 0)
953 // CHECK-NEXT:    [[OMP_ARRAYCPY_DEST_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_DESTELEMENTPAST]], i32 1
954 // CHECK-NEXT:    [[OMP_ARRAYCPY_SRC_ELEMENT]] = getelementptr [[STRUCT_S_0]], %struct.S.0* [[OMP_ARRAYCPY_SRCELEMENTPAST]], i32 1
955 // CHECK-NEXT:    [[OMP_ARRAYCPY_DONE:%.*]] = icmp eq %struct.S.0* [[OMP_ARRAYCPY_DEST_ELEMENT]], [[TMP14]]
956 // CHECK-NEXT:    br i1 [[OMP_ARRAYCPY_DONE]], label [[OMP_ARRAYCPY_DONE3]], label [[OMP_ARRAYCPY_BODY]]
957 // CHECK:       omp.arraycpy.done3:
958 // CHECK-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP8]], i32 0, i32 3
959 // CHECK-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_ANON_1]], %struct.anon.1* [[TMP9]], i32 0, i32 1
960 // CHECK-NEXT:    [[TMP17:%.*]] = load %struct.S.0*, %struct.S.0** [[TMP16]], align 8
961 // CHECK-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP15]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP17]], i32 noundef 0)
962 // CHECK-NEXT:    ret void
963 //
964 //
965 // CHECK-LABEL: define {{[^@]+}}@.omp_task_destructor..7
966 // CHECK-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates.2* noalias noundef [[TMP1:%.*]]) #[[ATTR7]] {
967 // CHECK-NEXT:  entry:
968 // CHECK-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
969 // CHECK-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
970 // CHECK-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates.2*, align 8
971 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
972 // CHECK-NEXT:    store %struct.kmp_task_t_with_privates.2* [[TMP1]], %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
973 // CHECK-NEXT:    [[TMP2:%.*]] = load %struct.kmp_task_t_with_privates.2*, %struct.kmp_task_t_with_privates.2** [[DOTADDR1]], align 8
974 // CHECK-NEXT:    [[TMP3:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES_2:%.*]], %struct.kmp_task_t_with_privates.2* [[TMP2]], i32 0, i32 2
975 // CHECK-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3:%.*]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 2
976 // CHECK-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T_3]], %struct..kmp_privates.t.3* [[TMP3]], i32 0, i32 3
977 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP5]]) #[[ATTR4]]
978 // CHECK-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[TMP4]], i32 0, i32 0
979 // CHECK-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
980 // CHECK-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
981 // CHECK:       arraydestroy.body:
982 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP6]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
983 // CHECK-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
984 // CHECK-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
985 // CHECK-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
986 // CHECK-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE2:%.*]], label [[ARRAYDESTROY_BODY]]
987 // CHECK:       arraydestroy.done2:
988 // CHECK-NEXT:    [[TMP7:%.*]] = load i32, i32* [[RETVAL]], align 4
989 // CHECK-NEXT:    ret i32 [[TMP7]]
990 //
991 //
992 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
993 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
994 // CHECK-NEXT:  entry:
995 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
996 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
997 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
998 // CHECK-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
999 // CHECK-NEXT:    ret void
1000 //
1001 //
1002 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1003 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1004 // CHECK-NEXT:  entry:
1005 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1006 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1007 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1008 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1009 // CHECK-NEXT:    store i32 0, i32* [[F]], align 4
1010 // CHECK-NEXT:    ret void
1011 //
1012 //
1013 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
1014 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1015 // CHECK-NEXT:  entry:
1016 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1017 // CHECK-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1018 // CHECK-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1019 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1020 // CHECK-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1021 // CHECK-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1022 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1023 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1024 // CHECK-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1025 // CHECK-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
1026 // CHECK-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
1027 // CHECK-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
1028 // CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
1029 // CHECK-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1030 // CHECK-NEXT:    ret void
1031 //
1032 //
1033 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1034 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1035 // CHECK-NEXT:  entry:
1036 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1037 // CHECK-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1038 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1039 // CHECK-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1040 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1041 // CHECK-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1042 // CHECK-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1043 // CHECK-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1044 // CHECK-NEXT:    ret void
1045 //
1046 //
1047 // CHECK-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1048 // CHECK-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1049 // CHECK-NEXT:  entry:
1050 // CHECK-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1051 // CHECK-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1052 // CHECK-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1053 // CHECK-NEXT:    ret void
1054 //
1055 //
1056 // LAMBDA-LABEL: define {{[^@]+}}@main
1057 // LAMBDA-SAME: () #[[ATTR0:[0-9]+]] {
1058 // LAMBDA-NEXT:  entry:
1059 // LAMBDA-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1060 // LAMBDA-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
1061 // LAMBDA-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1062 // LAMBDA-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
1063 // LAMBDA-NEXT:    ret i32 0
1064 //
1065 //
1066 // LAMBDA-LABEL: define {{[^@]+}}@.omp_outlined.
1067 // LAMBDA-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR2:[0-9]+]] {
1068 // LAMBDA-NEXT:  entry:
1069 // LAMBDA-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1070 // LAMBDA-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1071 // LAMBDA-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
1072 // LAMBDA-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1073 // LAMBDA-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1074 // LAMBDA-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1075 // LAMBDA-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1076 // LAMBDA-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1077 // LAMBDA-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP1]])
1078 // LAMBDA-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1079 // LAMBDA-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1080 // LAMBDA:       omp_if.then:
1081 // LAMBDA-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1082 // LAMBDA-NEXT:    [[TMP4:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1083 // LAMBDA-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.kmp_task_t_with_privates*
1084 // LAMBDA-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 0
1085 // LAMBDA-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 1
1086 // LAMBDA-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 0
1087 // LAMBDA-NEXT:    [[TMP9:%.*]] = load volatile double, double* @g, align 8
1088 // LAMBDA-NEXT:    store volatile double [[TMP9]], double* [[TMP8]], align 8
1089 // LAMBDA-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 1
1090 // LAMBDA-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
1091 // LAMBDA-NEXT:    store i32 [[TMP11]], i32* [[TMP10]], align 8
1092 // LAMBDA-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 5
1093 // LAMBDA-NEXT:    store i64 0, i64* [[TMP12]], align 8
1094 // LAMBDA-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 6
1095 // LAMBDA-NEXT:    store i64 9, i64* [[TMP13]], align 8
1096 // LAMBDA-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 7
1097 // LAMBDA-NEXT:    store i64 1, i64* [[TMP14]], align 8
1098 // LAMBDA-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 9
1099 // LAMBDA-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i8*
1100 // LAMBDA-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP16]], i8 0, i64 8, i1 false)
1101 // LAMBDA-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP14]], align 8
1102 // LAMBDA-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP4]], i32 1, i64* [[TMP12]], i64* [[TMP13]], i64 [[TMP17]], i32 1, i32 0, i64 0, i8* null)
1103 // LAMBDA-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1104 // LAMBDA-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1105 // LAMBDA-NEXT:    br label [[OMP_IF_END]]
1106 // LAMBDA:       omp_if.end:
1107 // LAMBDA-NEXT:    ret void
1108 //
1109 //
1110 // LAMBDA-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1111 // LAMBDA-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], double** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR5:[0-9]+]] {
1112 // LAMBDA-NEXT:  entry:
1113 // LAMBDA-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1114 // LAMBDA-NEXT:    [[DOTADDR1:%.*]] = alloca double**, align 8
1115 // LAMBDA-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
1116 // LAMBDA-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1117 // LAMBDA-NEXT:    store double** [[TMP1]], double*** [[DOTADDR1]], align 8
1118 // LAMBDA-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
1119 // LAMBDA-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1120 // LAMBDA-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1121 // LAMBDA-NEXT:    [[TMP5:%.*]] = load double**, double*** [[DOTADDR1]], align 8
1122 // LAMBDA-NEXT:    store double* [[TMP4]], double** [[TMP5]], align 8
1123 // LAMBDA-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1124 // LAMBDA-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
1125 // LAMBDA-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
1126 // LAMBDA-NEXT:    ret void
1127 //
1128 //
1129 // LAMBDA-LABEL: define {{[^@]+}}@.omp_task_entry.
1130 // LAMBDA-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR6:[0-9]+]] {
1131 // LAMBDA-NEXT:  entry:
1132 // LAMBDA-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1133 // LAMBDA-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1134 // LAMBDA-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1135 // LAMBDA-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1136 // LAMBDA-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1137 // LAMBDA-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1138 // LAMBDA-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1139 // LAMBDA-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1140 // LAMBDA-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1141 // LAMBDA-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1142 // LAMBDA-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1143 // LAMBDA-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca double*, align 8
1144 // LAMBDA-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
1145 // LAMBDA-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1146 // LAMBDA-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1147 // LAMBDA-NEXT:    [[REF_TMP_I:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
1148 // LAMBDA-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1149 // LAMBDA-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1150 // LAMBDA-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1151 // LAMBDA-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1152 // LAMBDA-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1153 // LAMBDA-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1154 // LAMBDA-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1155 // LAMBDA-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1156 // LAMBDA-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1157 // LAMBDA-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1158 // LAMBDA-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1159 // LAMBDA-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1160 // LAMBDA-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1161 // LAMBDA-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1162 // LAMBDA-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1163 // LAMBDA-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1164 // LAMBDA-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1165 // LAMBDA-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1166 // LAMBDA-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1167 // LAMBDA-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1168 // LAMBDA-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1169 // LAMBDA-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1170 // LAMBDA-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1171 // LAMBDA-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1172 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1173 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1174 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1175 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1176 // LAMBDA-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1177 // LAMBDA-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1178 // LAMBDA-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1179 // LAMBDA-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1180 // LAMBDA-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, double**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1181 // LAMBDA-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1182 // LAMBDA-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1183 // LAMBDA-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1184 // LAMBDA-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1185 // LAMBDA-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1186 // LAMBDA-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1187 // LAMBDA-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1188 // LAMBDA-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1189 // LAMBDA-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1190 // LAMBDA-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1191 // LAMBDA-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, double**, i32**)*
1192 // LAMBDA-NEXT:    call void [[TMP25]](i8* [[TMP24]], double** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR3:[0-9]+]]
1193 // LAMBDA-NEXT:    [[TMP26:%.*]] = load double*, double** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1194 // LAMBDA-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1195 // LAMBDA-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1196 // LAMBDA-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP28]] to i32
1197 // LAMBDA-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1198 // LAMBDA-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1199 // LAMBDA:       omp.inner.for.cond.i:
1200 // LAMBDA-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]]
1201 // LAMBDA-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP29]] to i64
1202 // LAMBDA-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1203 // LAMBDA-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP30]]
1204 // LAMBDA-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
1205 // LAMBDA:       omp.inner.for.body.i:
1206 // LAMBDA-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1207 // LAMBDA-NEXT:    store i32 [[TMP31]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1208 // LAMBDA-NEXT:    store double 1.000000e+00, double* [[TMP26]], align 8, !llvm.access.group [[ACC_GRP15]]
1209 // LAMBDA-NEXT:    store i32 11, i32* [[TMP27]], align 4, !llvm.access.group [[ACC_GRP15]]
1210 // LAMBDA-NEXT:    [[TMP32:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP_I]], i32 0, i32 0
1211 // LAMBDA-NEXT:    store double* [[TMP26]], double** [[TMP32]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1212 // LAMBDA-NEXT:    [[TMP33:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP_I]], i32 0, i32 1
1213 // LAMBDA-NEXT:    store i32* [[TMP27]], i32** [[TMP33]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1214 // LAMBDA-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(16) [[REF_TMP_I]]), !llvm.access.group [[ACC_GRP15]]
1215 // LAMBDA-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1216 // LAMBDA-NEXT:    [[ADD3_I:%.*]] = add nsw i32 [[TMP34]], 1
1217 // LAMBDA-NEXT:    store i32 [[ADD3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1218 // LAMBDA-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
1219 // LAMBDA:       .omp_outlined..1.exit:
1220 // LAMBDA-NEXT:    ret i32 0
1221 //
1222 //
1223 // BLOCKS-LABEL: define {{[^@]+}}@main
1224 // BLOCKS-SAME: () #[[ATTR1:[0-9]+]] {
1225 // BLOCKS-NEXT:  entry:
1226 // BLOCKS-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1227 // BLOCKS-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1228 // BLOCKS-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
1229 // BLOCKS-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
1230 // BLOCKS-NEXT:    call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
1231 // BLOCKS-NEXT:    ret i32 0
1232 //
1233 //
1234 // BLOCKS-LABEL: define {{[^@]+}}@__main_block_invoke
1235 // BLOCKS-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
1236 // BLOCKS-NEXT:  entry:
1237 // BLOCKS-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1238 // BLOCKS-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
1239 // BLOCKS-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1240 // BLOCKS-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
1241 // BLOCKS-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
1242 // BLOCKS-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 0, void (i32*, i32*, ...)* bitcast (void (i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*))
1243 // BLOCKS-NEXT:    ret void
1244 //
1245 //
1246 // BLOCKS-LABEL: define {{[^@]+}}@.omp_outlined.
1247 // BLOCKS-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]]) #[[ATTR3:[0-9]+]] {
1248 // BLOCKS-NEXT:  entry:
1249 // BLOCKS-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1250 // BLOCKS-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1251 // BLOCKS-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 1
1252 // BLOCKS-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1253 // BLOCKS-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1254 // BLOCKS-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1255 // BLOCKS-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1256 // BLOCKS-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
1257 // BLOCKS-NEXT:    [[TMP2:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1258 // BLOCKS-NEXT:    [[TMP3:%.*]] = icmp ne i32 [[TMP2]], 0
1259 // BLOCKS-NEXT:    br i1 [[TMP3]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1260 // BLOCKS:       omp_if.then:
1261 // BLOCKS-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1262 // BLOCKS-NEXT:    [[TMP4:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i32 1, i64 96, i64 1, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1263 // BLOCKS-NEXT:    [[TMP5:%.*]] = bitcast i8* [[TMP4]] to %struct.kmp_task_t_with_privates*
1264 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 0
1265 // BLOCKS-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP5]], i32 0, i32 1
1266 // BLOCKS-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 0
1267 // BLOCKS-NEXT:    [[TMP9:%.*]] = load volatile double, double* @g, align 8
1268 // BLOCKS-NEXT:    store volatile double [[TMP9]], double* [[TMP8]], align 8
1269 // BLOCKS-NEXT:    [[TMP10:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP7]], i32 0, i32 1
1270 // BLOCKS-NEXT:    [[TMP11:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4
1271 // BLOCKS-NEXT:    store i32 [[TMP11]], i32* [[TMP10]], align 8
1272 // BLOCKS-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 5
1273 // BLOCKS-NEXT:    store i64 0, i64* [[TMP12]], align 8
1274 // BLOCKS-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 6
1275 // BLOCKS-NEXT:    store i64 9, i64* [[TMP13]], align 8
1276 // BLOCKS-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 7
1277 // BLOCKS-NEXT:    store i64 1, i64* [[TMP14]], align 8
1278 // BLOCKS-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP6]], i32 0, i32 9
1279 // BLOCKS-NEXT:    [[TMP16:%.*]] = bitcast i8** [[TMP15]] to i8*
1280 // BLOCKS-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP16]], i8 0, i64 8, i1 false)
1281 // BLOCKS-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP14]], align 8
1282 // BLOCKS-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]], i8* [[TMP4]], i32 1, i64* [[TMP12]], i64* [[TMP13]], i64 [[TMP17]], i32 1, i32 0, i64 0, i8* null)
1283 // BLOCKS-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1284 // BLOCKS-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP1]])
1285 // BLOCKS-NEXT:    br label [[OMP_IF_END]]
1286 // BLOCKS:       omp_if.end:
1287 // BLOCKS-NEXT:    ret void
1288 //
1289 //
1290 // BLOCKS-LABEL: define {{[^@]+}}@_block_invoke
1291 // BLOCKS-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
1292 // BLOCKS-NEXT:  entry:
1293 // BLOCKS-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
1294 // BLOCKS-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*, align 8
1295 // BLOCKS-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
1296 // BLOCKS-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*
1297 // BLOCKS-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>** [[BLOCK_ADDR]], align 8
1298 // BLOCKS-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 5
1299 // BLOCKS-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
1300 // BLOCKS-NEXT:    [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 6
1301 // BLOCKS-NEXT:    store i32 22, i32* [[BLOCK_CAPTURE_ADDR1]], align 8
1302 // BLOCKS-NEXT:    ret void
1303 //
1304 //
1305 // BLOCKS-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1306 // BLOCKS-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], double** noalias noundef [[TMP1:%.*]], i32** noalias noundef [[TMP2:%.*]]) #[[ATTR6:[0-9]+]] {
1307 // BLOCKS-NEXT:  entry:
1308 // BLOCKS-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1309 // BLOCKS-NEXT:    [[DOTADDR1:%.*]] = alloca double**, align 8
1310 // BLOCKS-NEXT:    [[DOTADDR2:%.*]] = alloca i32**, align 8
1311 // BLOCKS-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1312 // BLOCKS-NEXT:    store double** [[TMP1]], double*** [[DOTADDR1]], align 8
1313 // BLOCKS-NEXT:    store i32** [[TMP2]], i32*** [[DOTADDR2]], align 8
1314 // BLOCKS-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1315 // BLOCKS-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1316 // BLOCKS-NEXT:    [[TMP5:%.*]] = load double**, double*** [[DOTADDR1]], align 8
1317 // BLOCKS-NEXT:    store double* [[TMP4]], double** [[TMP5]], align 8
1318 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1319 // BLOCKS-NEXT:    [[TMP7:%.*]] = load i32**, i32*** [[DOTADDR2]], align 8
1320 // BLOCKS-NEXT:    store i32* [[TMP6]], i32** [[TMP7]], align 8
1321 // BLOCKS-NEXT:    ret void
1322 //
1323 //
1324 // BLOCKS-LABEL: define {{[^@]+}}@.omp_task_entry.
1325 // BLOCKS-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR7:[0-9]+]] {
1326 // BLOCKS-NEXT:  entry:
1327 // BLOCKS-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1328 // BLOCKS-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1329 // BLOCKS-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1330 // BLOCKS-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1331 // BLOCKS-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1332 // BLOCKS-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1333 // BLOCKS-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1334 // BLOCKS-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1335 // BLOCKS-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1336 // BLOCKS-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1337 // BLOCKS-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1338 // BLOCKS-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca double*, align 8
1339 // BLOCKS-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca i32*, align 8
1340 // BLOCKS-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1341 // BLOCKS-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1342 // BLOCKS-NEXT:    [[BLOCK_I:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, align 8
1343 // BLOCKS-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1344 // BLOCKS-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1345 // BLOCKS-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1346 // BLOCKS-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1347 // BLOCKS-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1348 // BLOCKS-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1349 // BLOCKS-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1350 // BLOCKS-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1351 // BLOCKS-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1352 // BLOCKS-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1353 // BLOCKS-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1354 // BLOCKS-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1355 // BLOCKS-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1356 // BLOCKS-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1357 // BLOCKS-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1358 // BLOCKS-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1359 // BLOCKS-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1360 // BLOCKS-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1361 // BLOCKS-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1362 // BLOCKS-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1363 // BLOCKS-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1364 // BLOCKS-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1365 // BLOCKS-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1366 // BLOCKS-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1367 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1368 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1369 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1370 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1371 // BLOCKS-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1372 // BLOCKS-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1373 // BLOCKS-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1374 // BLOCKS-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1375 // BLOCKS-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, double**, i32**)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1376 // BLOCKS-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1377 // BLOCKS-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1378 // BLOCKS-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1379 // BLOCKS-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1380 // BLOCKS-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1381 // BLOCKS-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1382 // BLOCKS-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1383 // BLOCKS-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1384 // BLOCKS-NEXT:    [[TMP23:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1385 // BLOCKS-NEXT:    [[TMP24:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1386 // BLOCKS-NEXT:    [[TMP25:%.*]] = bitcast void (i8*, ...)* [[TMP23]] to void (i8*, double**, i32**)*
1387 // BLOCKS-NEXT:    call void [[TMP25]](i8* [[TMP24]], double** [[DOTFIRSTPRIV_PTR_ADDR_I]], i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR4:[0-9]+]]
1388 // BLOCKS-NEXT:    [[TMP26:%.*]] = load double*, double** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1389 // BLOCKS-NEXT:    [[TMP27:%.*]] = load i32*, i32** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1390 // BLOCKS-NEXT:    [[TMP28:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1391 // BLOCKS-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP28]] to i32
1392 // BLOCKS-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1393 // BLOCKS-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1394 // BLOCKS:       omp.inner.for.cond.i:
1395 // BLOCKS-NEXT:    [[TMP29:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]]
1396 // BLOCKS-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP29]] to i64
1397 // BLOCKS-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1398 // BLOCKS-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP30]]
1399 // BLOCKS-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
1400 // BLOCKS:       omp.inner.for.body.i:
1401 // BLOCKS-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1402 // BLOCKS-NEXT:    store i32 [[TMP31]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1403 // BLOCKS-NEXT:    store double 1.000000e+00, double* [[TMP26]], align 8, !llvm.access.group [[ACC_GRP15]]
1404 // BLOCKS-NEXT:    store i32 11, i32* [[TMP27]], align 4, !llvm.access.group [[ACC_GRP15]]
1405 // BLOCKS-NEXT:    [[BLOCK_ISA_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 0
1406 // BLOCKS-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1407 // BLOCKS-NEXT:    [[BLOCK_FLAGS_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 1
1408 // BLOCKS-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1409 // BLOCKS-NEXT:    [[BLOCK_RESERVED_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 2
1410 // BLOCKS-NEXT:    store i32 0, i32* [[BLOCK_RESERVED_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1411 // BLOCKS-NEXT:    [[BLOCK_INVOKE_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 3
1412 // BLOCKS-NEXT:    store i8* bitcast (void (i8*)* @_block_invoke to i8*), i8** [[BLOCK_INVOKE_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1413 // BLOCKS-NEXT:    [[BLOCK_DESCRIPTOR_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 4
1414 // BLOCKS-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.2 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1415 // BLOCKS-NEXT:    [[BLOCK_CAPTURED_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 5
1416 // BLOCKS-NEXT:    [[TMP32:%.*]] = load volatile double, double* [[TMP26]], align 8, !llvm.access.group [[ACC_GRP15]]
1417 // BLOCKS-NEXT:    store volatile double [[TMP32]], double* [[BLOCK_CAPTURED_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1418 // BLOCKS-NEXT:    [[BLOCK_CAPTURED3_I:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]], i32 0, i32 6
1419 // BLOCKS-NEXT:    [[TMP33:%.*]] = load i32, i32* [[TMP27]], align 4, !llvm.access.group [[ACC_GRP15]]
1420 // BLOCKS-NEXT:    store i32 [[TMP33]], i32* [[BLOCK_CAPTURED3_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1421 // BLOCKS-NEXT:    [[TMP34:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK_I]] to void ()*
1422 // BLOCKS-NEXT:    [[BLOCK_LITERAL_I:%.*]] = bitcast void ()* [[TMP34]] to %struct.__block_literal_generic*
1423 // BLOCKS-NEXT:    [[TMP35:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL_I]], i32 0, i32 3
1424 // BLOCKS-NEXT:    [[TMP36:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL_I]] to i8*
1425 // BLOCKS-NEXT:    [[TMP37:%.*]] = load i8*, i8** [[TMP35]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1426 // BLOCKS-NEXT:    [[TMP38:%.*]] = bitcast i8* [[TMP37]] to void (i8*)*
1427 // BLOCKS-NEXT:    call void [[TMP38]](i8* noundef [[TMP36]]) #[[ATTR4]], !llvm.access.group [[ACC_GRP15]]
1428 // BLOCKS-NEXT:    [[TMP39:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1429 // BLOCKS-NEXT:    [[ADD4_I:%.*]] = add nsw i32 [[TMP39]], 1
1430 // BLOCKS-NEXT:    store i32 [[ADD4_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1431 // BLOCKS-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
1432 // BLOCKS:       .omp_outlined..1.exit:
1433 // BLOCKS-NEXT:    ret i32 0
1434 //
1435 //
1436 // ARRAY-LABEL: define {{[^@]+}}@_Z10array_funciPfP2St
1437 // ARRAY-SAME: (i32 noundef [[N:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR0:[0-9]+]] {
1438 // ARRAY-NEXT:  entry:
1439 // ARRAY-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
1440 // ARRAY-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
1441 // ARRAY-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
1442 // ARRAY-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
1443 // ARRAY-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
1444 // ARRAY-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
1445 // ARRAY-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
1446 // ARRAY-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
1447 // ARRAY-NEXT:    [[TMP2:%.*]] = load float*, float** [[A_ADDR]], align 8
1448 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct.St*, %struct.St** [[S_ADDR]], align 8
1449 // ARRAY-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 3, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i64, float*, %struct.St*)* @.omp_outlined. to void (i32*, i32*, ...)*), i64 [[TMP1]], float* [[TMP2]], %struct.St* [[TMP3]])
1450 // ARRAY-NEXT:    ret void
1451 //
1452 //
1453 // ARRAY-LABEL: define {{[^@]+}}@.omp_outlined.
1454 // ARRAY-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i64 noundef [[VLA:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR1:[0-9]+]] {
1455 // ARRAY-NEXT:  entry:
1456 // ARRAY-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
1457 // ARRAY-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
1458 // ARRAY-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
1459 // ARRAY-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
1460 // ARRAY-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
1461 // ARRAY-NEXT:    [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
1462 // ARRAY-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1463 // ARRAY-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
1464 // ARRAY-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
1465 // ARRAY-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
1466 // ARRAY-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
1467 // ARRAY-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
1468 // ARRAY-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
1469 // ARRAY-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
1470 // ARRAY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
1471 // ARRAY-NEXT:    [[TMP3:%.*]] = call i32 @__kmpc_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1472 // ARRAY-NEXT:    [[TMP4:%.*]] = icmp ne i32 [[TMP3]], 0
1473 // ARRAY-NEXT:    br i1 [[TMP4]], label [[OMP_IF_THEN:%.*]], label [[OMP_IF_END:%.*]]
1474 // ARRAY:       omp_if.then:
1475 // ARRAY-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
1476 // ARRAY-NEXT:    store i64 [[TMP0]], i64* [[TMP5]], align 8
1477 // ARRAY-NEXT:    call void @__kmpc_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1478 // ARRAY-NEXT:    [[TMP6:%.*]] = call i8* @__kmpc_omp_task_alloc(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i32 1, i64 96, i64 8, i32 (i32, i8*)* bitcast (i32 (i32, %struct.kmp_task_t_with_privates*)* @.omp_task_entry. to i32 (i32, i8*)*))
1479 // ARRAY-NEXT:    [[TMP7:%.*]] = bitcast i8* [[TMP6]] to %struct.kmp_task_t_with_privates*
1480 // ARRAY-NEXT:    [[TMP8:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP7]], i32 0, i32 0
1481 // ARRAY-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 0
1482 // ARRAY-NEXT:    [[TMP10:%.*]] = load i8*, i8** [[TMP9]], align 8
1483 // ARRAY-NEXT:    [[TMP11:%.*]] = bitcast %struct.anon* [[AGG_CAPTURED]] to i8*
1484 // ARRAY-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 8 [[TMP10]], i8* align 8 [[TMP11]], i64 8, i1 false)
1485 // ARRAY-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP7]], i32 0, i32 1
1486 // ARRAY-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP12]], i32 0, i32 0
1487 // ARRAY-NEXT:    [[TMP14:%.*]] = load float*, float** [[A_ADDR]], align 8
1488 // ARRAY-NEXT:    store float* [[TMP14]], float** [[TMP13]], align 8
1489 // ARRAY-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP12]], i32 0, i32 1
1490 // ARRAY-NEXT:    [[TMP16:%.*]] = load %struct.St*, %struct.St** [[S_ADDR]], align 8
1491 // ARRAY-NEXT:    store %struct.St* [[TMP16]], %struct.St** [[TMP15]], align 8
1492 // ARRAY-NEXT:    [[TMP17:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 5
1493 // ARRAY-NEXT:    store i64 0, i64* [[TMP17]], align 8
1494 // ARRAY-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 6
1495 // ARRAY-NEXT:    store i64 9, i64* [[TMP18]], align 8
1496 // ARRAY-NEXT:    [[TMP19:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 7
1497 // ARRAY-NEXT:    store i64 1, i64* [[TMP19]], align 8
1498 // ARRAY-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP8]], i32 0, i32 9
1499 // ARRAY-NEXT:    [[TMP21:%.*]] = bitcast i8** [[TMP20]] to i8*
1500 // ARRAY-NEXT:    call void @llvm.memset.p0i8.i64(i8* align 8 [[TMP21]], i8 0, i64 8, i1 false)
1501 // ARRAY-NEXT:    [[TMP22:%.*]] = load i64, i64* [[TMP19]], align 8
1502 // ARRAY-NEXT:    call void @__kmpc_taskloop(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]], i8* [[TMP6]], i32 1, i64* [[TMP17]], i64* [[TMP18]], i64 [[TMP22]], i32 1, i32 0, i64 0, i8* null)
1503 // ARRAY-NEXT:    call void @__kmpc_end_taskgroup(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1504 // ARRAY-NEXT:    call void @__kmpc_end_master(%struct.ident_t* @[[GLOB1]], i32 [[TMP2]])
1505 // ARRAY-NEXT:    br label [[OMP_IF_END]]
1506 // ARRAY:       omp_if.end:
1507 // ARRAY-NEXT:    ret void
1508 //
1509 //
1510 // ARRAY-LABEL: define {{[^@]+}}@.omp_task_privates_map.
1511 // ARRAY-SAME: (%struct..kmp_privates.t* noalias noundef [[TMP0:%.*]], float*** noalias noundef [[TMP1:%.*]], %struct.St*** noalias noundef [[TMP2:%.*]]) #[[ATTR4:[0-9]+]] {
1512 // ARRAY-NEXT:  entry:
1513 // ARRAY-NEXT:    [[DOTADDR:%.*]] = alloca %struct..kmp_privates.t*, align 8
1514 // ARRAY-NEXT:    [[DOTADDR1:%.*]] = alloca float***, align 8
1515 // ARRAY-NEXT:    [[DOTADDR2:%.*]] = alloca %struct.St***, align 8
1516 // ARRAY-NEXT:    store %struct..kmp_privates.t* [[TMP0]], %struct..kmp_privates.t** [[DOTADDR]], align 8
1517 // ARRAY-NEXT:    store float*** [[TMP1]], float**** [[DOTADDR1]], align 8
1518 // ARRAY-NEXT:    store %struct.St*** [[TMP2]], %struct.St**** [[DOTADDR2]], align 8
1519 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct..kmp_privates.t*, %struct..kmp_privates.t** [[DOTADDR]], align 8
1520 // ARRAY-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T:%.*]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 0
1521 // ARRAY-NEXT:    [[TMP5:%.*]] = load float***, float**** [[DOTADDR1]], align 8
1522 // ARRAY-NEXT:    store float** [[TMP4]], float*** [[TMP5]], align 8
1523 // ARRAY-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT__KMP_PRIVATES_T]], %struct..kmp_privates.t* [[TMP3]], i32 0, i32 1
1524 // ARRAY-NEXT:    [[TMP7:%.*]] = load %struct.St***, %struct.St**** [[DOTADDR2]], align 8
1525 // ARRAY-NEXT:    store %struct.St** [[TMP6]], %struct.St*** [[TMP7]], align 8
1526 // ARRAY-NEXT:    ret void
1527 //
1528 //
1529 // ARRAY-LABEL: define {{[^@]+}}@.omp_task_entry.
1530 // ARRAY-SAME: (i32 noundef [[TMP0:%.*]], %struct.kmp_task_t_with_privates* noalias noundef [[TMP1:%.*]]) #[[ATTR5:[0-9]+]] {
1531 // ARRAY-NEXT:  entry:
1532 // ARRAY-NEXT:    [[DOTGLOBAL_TID__ADDR_I:%.*]] = alloca i32, align 4
1533 // ARRAY-NEXT:    [[DOTPART_ID__ADDR_I:%.*]] = alloca i32*, align 8
1534 // ARRAY-NEXT:    [[DOTPRIVATES__ADDR_I:%.*]] = alloca i8*, align 8
1535 // ARRAY-NEXT:    [[DOTCOPY_FN__ADDR_I:%.*]] = alloca void (i8*, ...)*, align 8
1536 // ARRAY-NEXT:    [[DOTTASK_T__ADDR_I:%.*]] = alloca i8*, align 8
1537 // ARRAY-NEXT:    [[DOTLB__ADDR_I:%.*]] = alloca i64, align 8
1538 // ARRAY-NEXT:    [[DOTUB__ADDR_I:%.*]] = alloca i64, align 8
1539 // ARRAY-NEXT:    [[DOTST__ADDR_I:%.*]] = alloca i64, align 8
1540 // ARRAY-NEXT:    [[DOTLITER__ADDR_I:%.*]] = alloca i32, align 4
1541 // ARRAY-NEXT:    [[DOTREDUCTIONS__ADDR_I:%.*]] = alloca i8*, align 8
1542 // ARRAY-NEXT:    [[__CONTEXT_ADDR_I:%.*]] = alloca %struct.anon*, align 8
1543 // ARRAY-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR_I:%.*]] = alloca float**, align 8
1544 // ARRAY-NEXT:    [[DOTFIRSTPRIV_PTR_ADDR1_I:%.*]] = alloca %struct.St**, align 8
1545 // ARRAY-NEXT:    [[I_I:%.*]] = alloca i32, align 4
1546 // ARRAY-NEXT:    [[DOTOMP_IV_I:%.*]] = alloca i32, align 4
1547 // ARRAY-NEXT:    [[DOTADDR:%.*]] = alloca i32, align 4
1548 // ARRAY-NEXT:    [[DOTADDR1:%.*]] = alloca %struct.kmp_task_t_with_privates*, align 8
1549 // ARRAY-NEXT:    store i32 [[TMP0]], i32* [[DOTADDR]], align 4
1550 // ARRAY-NEXT:    store %struct.kmp_task_t_with_privates* [[TMP1]], %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1551 // ARRAY-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTADDR]], align 4
1552 // ARRAY-NEXT:    [[TMP3:%.*]] = load %struct.kmp_task_t_with_privates*, %struct.kmp_task_t_with_privates** [[DOTADDR1]], align 8
1553 // ARRAY-NEXT:    [[TMP4:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES:%.*]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 0
1554 // ARRAY-NEXT:    [[TMP5:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T:%.*]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 2
1555 // ARRAY-NEXT:    [[TMP6:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 0
1556 // ARRAY-NEXT:    [[TMP7:%.*]] = load i8*, i8** [[TMP6]], align 8
1557 // ARRAY-NEXT:    [[TMP8:%.*]] = bitcast i8* [[TMP7]] to %struct.anon*
1558 // ARRAY-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T_WITH_PRIVATES]], %struct.kmp_task_t_with_privates* [[TMP3]], i32 0, i32 1
1559 // ARRAY-NEXT:    [[TMP10:%.*]] = bitcast %struct..kmp_privates.t* [[TMP9]] to i8*
1560 // ARRAY-NEXT:    [[TMP11:%.*]] = bitcast %struct.kmp_task_t_with_privates* [[TMP3]] to i8*
1561 // ARRAY-NEXT:    [[TMP12:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 5
1562 // ARRAY-NEXT:    [[TMP13:%.*]] = load i64, i64* [[TMP12]], align 8
1563 // ARRAY-NEXT:    [[TMP14:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 6
1564 // ARRAY-NEXT:    [[TMP15:%.*]] = load i64, i64* [[TMP14]], align 8
1565 // ARRAY-NEXT:    [[TMP16:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 7
1566 // ARRAY-NEXT:    [[TMP17:%.*]] = load i64, i64* [[TMP16]], align 8
1567 // ARRAY-NEXT:    [[TMP18:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 8
1568 // ARRAY-NEXT:    [[TMP19:%.*]] = load i32, i32* [[TMP18]], align 8
1569 // ARRAY-NEXT:    [[TMP20:%.*]] = getelementptr inbounds [[STRUCT_KMP_TASK_T]], %struct.kmp_task_t* [[TMP4]], i32 0, i32 9
1570 // ARRAY-NEXT:    [[TMP21:%.*]] = load i8*, i8** [[TMP20]], align 8
1571 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META3:![0-9]+]])
1572 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META6:![0-9]+]])
1573 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META8:![0-9]+]])
1574 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META10:![0-9]+]])
1575 // ARRAY-NEXT:    call void @llvm.experimental.noalias.scope.decl(metadata [[META12:![0-9]+]])
1576 // ARRAY-NEXT:    store i32 [[TMP2]], i32* [[DOTGLOBAL_TID__ADDR_I]], align 4, !noalias !14
1577 // ARRAY-NEXT:    store i32* [[TMP5]], i32** [[DOTPART_ID__ADDR_I]], align 8, !noalias !14
1578 // ARRAY-NEXT:    store i8* [[TMP10]], i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1579 // ARRAY-NEXT:    store void (i8*, ...)* bitcast (void (%struct..kmp_privates.t*, float***, %struct.St***)* @.omp_task_privates_map. to void (i8*, ...)*), void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1580 // ARRAY-NEXT:    store i8* [[TMP11]], i8** [[DOTTASK_T__ADDR_I]], align 8, !noalias !14
1581 // ARRAY-NEXT:    store i64 [[TMP13]], i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1582 // ARRAY-NEXT:    store i64 [[TMP15]], i64* [[DOTUB__ADDR_I]], align 8, !noalias !14
1583 // ARRAY-NEXT:    store i64 [[TMP17]], i64* [[DOTST__ADDR_I]], align 8, !noalias !14
1584 // ARRAY-NEXT:    store i32 [[TMP19]], i32* [[DOTLITER__ADDR_I]], align 4, !noalias !14
1585 // ARRAY-NEXT:    store i8* [[TMP21]], i8** [[DOTREDUCTIONS__ADDR_I]], align 8, !noalias !14
1586 // ARRAY-NEXT:    store %struct.anon* [[TMP8]], %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1587 // ARRAY-NEXT:    [[TMP22:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR_I]], align 8, !noalias !14
1588 // ARRAY-NEXT:    [[TMP23:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP22]], i32 0, i32 0
1589 // ARRAY-NEXT:    [[TMP24:%.*]] = load i64, i64* [[TMP23]], align 8
1590 // ARRAY-NEXT:    [[TMP25:%.*]] = load void (i8*, ...)*, void (i8*, ...)** [[DOTCOPY_FN__ADDR_I]], align 8, !noalias !14
1591 // ARRAY-NEXT:    [[TMP26:%.*]] = load i8*, i8** [[DOTPRIVATES__ADDR_I]], align 8, !noalias !14
1592 // ARRAY-NEXT:    [[TMP27:%.*]] = bitcast void (i8*, ...)* [[TMP25]] to void (i8*, float***, %struct.St***)*
1593 // ARRAY-NEXT:    call void [[TMP27]](i8* [[TMP26]], float*** [[DOTFIRSTPRIV_PTR_ADDR_I]], %struct.St*** [[DOTFIRSTPRIV_PTR_ADDR1_I]]) #[[ATTR2:[0-9]+]]
1594 // ARRAY-NEXT:    [[TMP28:%.*]] = load float**, float*** [[DOTFIRSTPRIV_PTR_ADDR_I]], align 8, !noalias !14
1595 // ARRAY-NEXT:    [[TMP29:%.*]] = load %struct.St**, %struct.St*** [[DOTFIRSTPRIV_PTR_ADDR1_I]], align 8, !noalias !14
1596 // ARRAY-NEXT:    [[TMP30:%.*]] = load i64, i64* [[DOTLB__ADDR_I]], align 8, !noalias !14
1597 // ARRAY-NEXT:    [[CONV_I:%.*]] = trunc i64 [[TMP30]] to i32
1598 // ARRAY-NEXT:    store i32 [[CONV_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14
1599 // ARRAY-NEXT:    br label [[OMP_INNER_FOR_COND_I:%.*]]
1600 // ARRAY:       omp.inner.for.cond.i:
1601 // ARRAY-NEXT:    [[TMP31:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15:![0-9]+]]
1602 // ARRAY-NEXT:    [[CONV2_I:%.*]] = sext i32 [[TMP31]] to i64
1603 // ARRAY-NEXT:    [[TMP32:%.*]] = load i64, i64* [[DOTUB__ADDR_I]], align 8, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1604 // ARRAY-NEXT:    [[CMP_I:%.*]] = icmp ule i64 [[CONV2_I]], [[TMP32]]
1605 // ARRAY-NEXT:    br i1 [[CMP_I]], label [[OMP_INNER_FOR_BODY_I:%.*]], label [[DOTOMP_OUTLINED__1_EXIT:%.*]]
1606 // ARRAY:       omp.inner.for.body.i:
1607 // ARRAY-NEXT:    [[TMP33:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1608 // ARRAY-NEXT:    store i32 [[TMP33]], i32* [[I_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1609 // ARRAY-NEXT:    [[TMP34:%.*]] = load i32, i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1610 // ARRAY-NEXT:    [[ADD3_I:%.*]] = add nsw i32 [[TMP34]], 1
1611 // ARRAY-NEXT:    store i32 [[ADD3_I]], i32* [[DOTOMP_IV_I]], align 4, !noalias !14, !llvm.access.group [[ACC_GRP15]]
1612 // ARRAY-NEXT:    br label [[OMP_INNER_FOR_COND_I]], !llvm.loop [[LOOP16:![0-9]+]]
1613 // ARRAY:       .omp_outlined..1.exit:
1614 // ARRAY-NEXT:    ret i32 0
1615 //
1616 //
1617 // SIMD-ONLY0-LABEL: define {{[^@]+}}@main
1618 // SIMD-ONLY0-SAME: () #[[ATTR0:[0-9]+]] {
1619 // SIMD-ONLY0-NEXT:  entry:
1620 // SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1621 // SIMD-ONLY0-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1622 // SIMD-ONLY0-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
1623 // SIMD-ONLY0-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1624 // SIMD-ONLY0-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1625 // SIMD-ONLY0-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
1626 // SIMD-ONLY0-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
1627 // SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1628 // SIMD-ONLY0-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1629 // SIMD-ONLY0-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1630 // SIMD-ONLY0-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1631 // SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
1632 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1633 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
1634 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
1635 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1636 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1637 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
1638 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1639 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
1640 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
1641 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
1642 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
1643 // SIMD-ONLY0-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1644 // SIMD-ONLY0-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
1645 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1646 // SIMD-ONLY0-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1647 // SIMD-ONLY0-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
1648 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1649 // SIMD-ONLY0:       omp.inner.for.cond:
1650 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
1651 // SIMD-ONLY0-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP2]] to i64
1652 // SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]]
1653 // SIMD-ONLY0-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]]
1654 // SIMD-ONLY0-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1655 // SIMD-ONLY0:       omp.inner.for.body:
1656 // SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1657 // SIMD-ONLY0-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1658 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1659 // SIMD-ONLY0-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
1660 // SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP2]]
1661 // SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
1662 // SIMD-ONLY0-NEXT:    store i32 [[TMP5]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
1663 // SIMD-ONLY0-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
1664 // SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
1665 // SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = bitcast %struct.S* [[VAR]] to i8*
1666 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP6]], i8* align 8 [[TMP7]], i64 8, i1 false), !llvm.access.group [[ACC_GRP2]]
1667 // SIMD-ONLY0-NEXT:    store i32 33, i32* @_ZZ4mainE5sivar, align 4, !llvm.access.group [[ACC_GRP2]]
1668 // SIMD-ONLY0-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1669 // SIMD-ONLY0:       omp.body.continue:
1670 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1671 // SIMD-ONLY0:       omp.inner.for.inc:
1672 // SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1673 // SIMD-ONLY0-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
1674 // SIMD-ONLY0-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
1675 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
1676 // SIMD-ONLY0:       omp.inner.for.end:
1677 // SIMD-ONLY0-NEXT:    store i32 10, i32* [[I]], align 4
1678 // SIMD-ONLY0-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
1679 // SIMD-ONLY0-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
1680 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
1681 // SIMD-ONLY0-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
1682 // SIMD-ONLY0-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
1683 // SIMD-ONLY0-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1684 // SIMD-ONLY0:       arraydestroy.body:
1685 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1686 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1687 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1688 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1689 // SIMD-ONLY0-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]]
1690 // SIMD-ONLY0:       arraydestroy.done4:
1691 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
1692 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
1693 // SIMD-ONLY0-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
1694 // SIMD-ONLY0-NEXT:    ret i32 [[TMP10]]
1695 //
1696 //
1697 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
1698 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
1699 // SIMD-ONLY0-NEXT:  entry:
1700 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1701 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1702 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1703 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
1704 // SIMD-ONLY0-NEXT:    ret void
1705 //
1706 //
1707 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
1708 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1709 // SIMD-ONLY0-NEXT:  entry:
1710 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1711 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
1712 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
1713 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1714 // SIMD-ONLY0-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
1715 // SIMD-ONLY0-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
1716 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1717 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
1718 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
1719 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
1720 // SIMD-ONLY0-NEXT:    ret void
1721 //
1722 //
1723 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
1724 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1725 // SIMD-ONLY0-NEXT:  entry:
1726 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1727 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
1728 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1729 // SIMD-ONLY0-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
1730 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1731 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
1732 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
1733 // SIMD-ONLY0-NEXT:    ret void
1734 //
1735 //
1736 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
1737 // SIMD-ONLY0-SAME: () #[[ATTR3:[0-9]+]] {
1738 // SIMD-ONLY0-NEXT:  entry:
1739 // SIMD-ONLY0-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1740 // SIMD-ONLY0-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
1741 // SIMD-ONLY0-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
1742 // SIMD-ONLY0-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
1743 // SIMD-ONLY0-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1744 // SIMD-ONLY0-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
1745 // SIMD-ONLY0-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
1746 // SIMD-ONLY0-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1747 // SIMD-ONLY0-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1748 // SIMD-ONLY0-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1749 // SIMD-ONLY0-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1750 // SIMD-ONLY0-NEXT:    [[I:%.*]] = alloca i32, align 4
1751 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
1752 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
1753 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[T_VAR]], align 128
1754 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
1755 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
1756 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1757 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
1758 // SIMD-ONLY0-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
1759 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
1760 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
1761 // SIMD-ONLY0-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
1762 // SIMD-ONLY0-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
1763 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
1764 // SIMD-ONLY0-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
1765 // SIMD-ONLY0-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
1766 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
1767 // SIMD-ONLY0:       omp.inner.for.cond:
1768 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
1769 // SIMD-ONLY0-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP2]] to i64
1770 // SIMD-ONLY0-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP6]]
1771 // SIMD-ONLY0-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]]
1772 // SIMD-ONLY0-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
1773 // SIMD-ONLY0:       omp.inner.for.body:
1774 // SIMD-ONLY0-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
1775 // SIMD-ONLY0-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
1776 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
1777 // SIMD-ONLY0-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP6]]
1778 // SIMD-ONLY0-NEXT:    [[TMP5:%.*]] = load i32, i32* [[T_VAR]], align 128, !llvm.access.group [[ACC_GRP6]]
1779 // SIMD-ONLY0-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
1780 // SIMD-ONLY0-NEXT:    store i32 [[TMP5]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]]
1781 // SIMD-ONLY0-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
1782 // SIMD-ONLY0-NEXT:    [[TMP6:%.*]] = bitcast %struct.S.0* [[ARRAYIDX2]] to i8*
1783 // SIMD-ONLY0-NEXT:    [[TMP7:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
1784 // SIMD-ONLY0-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group [[ACC_GRP6]]
1785 // SIMD-ONLY0-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
1786 // SIMD-ONLY0:       omp.body.continue:
1787 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
1788 // SIMD-ONLY0:       omp.inner.for.inc:
1789 // SIMD-ONLY0-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
1790 // SIMD-ONLY0-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
1791 // SIMD-ONLY0-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
1792 // SIMD-ONLY0-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
1793 // SIMD-ONLY0:       omp.inner.for.end:
1794 // SIMD-ONLY0-NEXT:    store i32 10, i32* [[I]], align 4
1795 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1796 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
1797 // SIMD-ONLY0-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
1798 // SIMD-ONLY0-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
1799 // SIMD-ONLY0-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
1800 // SIMD-ONLY0:       arraydestroy.body:
1801 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
1802 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
1803 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
1804 // SIMD-ONLY0-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
1805 // SIMD-ONLY0-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]]
1806 // SIMD-ONLY0:       arraydestroy.done4:
1807 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
1808 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
1809 // SIMD-ONLY0-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
1810 // SIMD-ONLY0-NEXT:    ret i32 [[TMP10]]
1811 //
1812 //
1813 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
1814 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1815 // SIMD-ONLY0-NEXT:  entry:
1816 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1817 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1818 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1819 // SIMD-ONLY0-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
1820 // SIMD-ONLY0-NEXT:    ret void
1821 //
1822 //
1823 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
1824 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1825 // SIMD-ONLY0-NEXT:  entry:
1826 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1827 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1828 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1829 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1830 // SIMD-ONLY0-NEXT:    store double 0.000000e+00, double* [[F]], align 8
1831 // SIMD-ONLY0-NEXT:    ret void
1832 //
1833 //
1834 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
1835 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1836 // SIMD-ONLY0-NEXT:  entry:
1837 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1838 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1839 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1840 // SIMD-ONLY0-NEXT:    ret void
1841 //
1842 //
1843 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
1844 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1845 // SIMD-ONLY0-NEXT:  entry:
1846 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1847 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
1848 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
1849 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1850 // SIMD-ONLY0-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
1851 // SIMD-ONLY0-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
1852 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1853 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1854 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
1855 // SIMD-ONLY0-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
1856 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
1857 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
1858 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
1859 // SIMD-ONLY0-NEXT:    store double [[ADD]], double* [[F]], align 8
1860 // SIMD-ONLY0-NEXT:    ret void
1861 //
1862 //
1863 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
1864 // SIMD-ONLY0-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1865 // SIMD-ONLY0-NEXT:  entry:
1866 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
1867 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
1868 // SIMD-ONLY0-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
1869 // SIMD-ONLY0-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
1870 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
1871 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
1872 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
1873 // SIMD-ONLY0-NEXT:    store double [[TMP0]], double* [[F]], align 8
1874 // SIMD-ONLY0-NEXT:    ret void
1875 //
1876 //
1877 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
1878 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1879 // SIMD-ONLY0-NEXT:  entry:
1880 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1881 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1882 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1883 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
1884 // SIMD-ONLY0-NEXT:    ret void
1885 //
1886 //
1887 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
1888 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1889 // SIMD-ONLY0-NEXT:  entry:
1890 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1891 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1892 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1893 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1894 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1895 // SIMD-ONLY0-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1896 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1897 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1898 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
1899 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
1900 // SIMD-ONLY0-NEXT:    ret void
1901 //
1902 //
1903 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
1904 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1905 // SIMD-ONLY0-NEXT:  entry:
1906 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1907 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1908 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1909 // SIMD-ONLY0-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1910 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1911 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1912 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
1913 // SIMD-ONLY0-NEXT:    ret void
1914 //
1915 //
1916 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
1917 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1918 // SIMD-ONLY0-NEXT:  entry:
1919 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1920 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1921 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1922 // SIMD-ONLY0-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
1923 // SIMD-ONLY0-NEXT:    ret void
1924 //
1925 //
1926 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
1927 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1928 // SIMD-ONLY0-NEXT:  entry:
1929 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1930 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1931 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1932 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1933 // SIMD-ONLY0-NEXT:    store i32 0, i32* [[F]], align 4
1934 // SIMD-ONLY0-NEXT:    ret void
1935 //
1936 //
1937 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
1938 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1939 // SIMD-ONLY0-NEXT:  entry:
1940 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1941 // SIMD-ONLY0-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
1942 // SIMD-ONLY0-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
1943 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1944 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
1945 // SIMD-ONLY0-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
1946 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1947 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1948 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
1949 // SIMD-ONLY0-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
1950 // SIMD-ONLY0-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
1951 // SIMD-ONLY0-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
1952 // SIMD-ONLY0-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
1953 // SIMD-ONLY0-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
1954 // SIMD-ONLY0-NEXT:    ret void
1955 //
1956 //
1957 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
1958 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1959 // SIMD-ONLY0-NEXT:  entry:
1960 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1961 // SIMD-ONLY0-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
1962 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1963 // SIMD-ONLY0-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
1964 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1965 // SIMD-ONLY0-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
1966 // SIMD-ONLY0-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
1967 // SIMD-ONLY0-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
1968 // SIMD-ONLY0-NEXT:    ret void
1969 //
1970 //
1971 // SIMD-ONLY0-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
1972 // SIMD-ONLY0-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
1973 // SIMD-ONLY0-NEXT:  entry:
1974 // SIMD-ONLY0-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
1975 // SIMD-ONLY0-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
1976 // SIMD-ONLY0-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
1977 // SIMD-ONLY0-NEXT:    ret void
1978 //
1979 //
1980 // SIMD-ONLY1-LABEL: define {{[^@]+}}@main
1981 // SIMD-ONLY1-SAME: () #[[ATTR0:[0-9]+]] {
1982 // SIMD-ONLY1-NEXT:  entry:
1983 // SIMD-ONLY1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
1984 // SIMD-ONLY1-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S:%.*]], align 8
1985 // SIMD-ONLY1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S]], align 8
1986 // SIMD-ONLY1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 4
1987 // SIMD-ONLY1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
1988 // SIMD-ONLY1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S], align 16
1989 // SIMD-ONLY1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S]], align 8
1990 // SIMD-ONLY1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
1991 // SIMD-ONLY1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
1992 // SIMD-ONLY1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
1993 // SIMD-ONLY1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
1994 // SIMD-ONLY1-NEXT:    [[I:%.*]] = alloca i32, align 4
1995 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
1996 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]])
1997 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]], double noundef 0.000000e+00)
1998 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[T_VAR]], align 4
1999 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2000 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const.main.vec to i8*), i64 8, i1 false)
2001 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
2002 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_BEGIN]], double noundef 1.000000e+00)
2003 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYINIT_BEGIN]], i64 1
2004 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYINIT_ELEMENT]], double noundef 2.000000e+00)
2005 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC1Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]], double noundef 3.000000e+00)
2006 // SIMD-ONLY1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2007 // SIMD-ONLY1-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
2008 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2009 // SIMD-ONLY1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2010 // SIMD-ONLY1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
2011 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2012 // SIMD-ONLY1:       omp.inner.for.cond:
2013 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
2014 // SIMD-ONLY1-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP2]] to i64
2015 // SIMD-ONLY1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]]
2016 // SIMD-ONLY1-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]]
2017 // SIMD-ONLY1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2018 // SIMD-ONLY1:       omp.inner.for.body:
2019 // SIMD-ONLY1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2020 // SIMD-ONLY1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
2021 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2022 // SIMD-ONLY1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
2023 // SIMD-ONLY1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[T_VAR]], align 4, !llvm.access.group [[ACC_GRP2]]
2024 // SIMD-ONLY1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
2025 // SIMD-ONLY1-NEXT:    store i32 [[TMP5]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP2]]
2026 // SIMD-ONLY1-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i64 0, i64 0
2027 // SIMD-ONLY1-NEXT:    [[TMP6:%.*]] = bitcast %struct.S* [[ARRAYIDX2]] to i8*
2028 // SIMD-ONLY1-NEXT:    [[TMP7:%.*]] = bitcast %struct.S* [[VAR]] to i8*
2029 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP6]], i8* align 8 [[TMP7]], i64 8, i1 false), !llvm.access.group [[ACC_GRP2]]
2030 // SIMD-ONLY1-NEXT:    store i32 33, i32* @_ZZ4mainE5sivar, align 4, !llvm.access.group [[ACC_GRP2]]
2031 // SIMD-ONLY1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2032 // SIMD-ONLY1:       omp.body.continue:
2033 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2034 // SIMD-ONLY1:       omp.inner.for.inc:
2035 // SIMD-ONLY1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2036 // SIMD-ONLY1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
2037 // SIMD-ONLY1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2038 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
2039 // SIMD-ONLY1:       omp.inner.for.end:
2040 // SIMD-ONLY1-NEXT:    store i32 10, i32* [[I]], align 4
2041 // SIMD-ONLY1-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
2042 // SIMD-ONLY1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
2043 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[VAR]]) #[[ATTR4:[0-9]+]]
2044 // SIMD-ONLY1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S], [2 x %struct.S]* [[S_ARR]], i32 0, i32 0
2045 // SIMD-ONLY1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAY_BEGIN]], i64 2
2046 // SIMD-ONLY1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2047 // SIMD-ONLY1:       arraydestroy.body:
2048 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S* [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2049 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2050 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2051 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
2052 // SIMD-ONLY1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]]
2053 // SIMD-ONLY1:       arraydestroy.done4:
2054 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TEST]]) #[[ATTR4]]
2055 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED1Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[TTT]]) #[[ATTR4]]
2056 // SIMD-ONLY1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
2057 // SIMD-ONLY1-NEXT:    ret i32 [[TMP10]]
2058 //
2059 //
2060 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ev
2061 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
2062 // SIMD-ONLY1-NEXT:  entry:
2063 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2064 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2065 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2066 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]])
2067 // SIMD-ONLY1-NEXT:    ret void
2068 //
2069 //
2070 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1ERKS0_d
2071 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2072 // SIMD-ONLY1-NEXT:  entry:
2073 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2074 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
2075 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
2076 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2077 // SIMD-ONLY1-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
2078 // SIMD-ONLY1-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
2079 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2080 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
2081 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load double, double* [[T_ADDR]], align 8
2082 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2ERKS0_d(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[TMP0]], double noundef [[TMP1]])
2083 // SIMD-ONLY1-NEXT:    ret void
2084 //
2085 //
2086 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC1Ed
2087 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2088 // SIMD-ONLY1-NEXT:  entry:
2089 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2090 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
2091 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2092 // SIMD-ONLY1-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
2093 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2094 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
2095 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdEC2Ed(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]], double noundef [[TMP0]])
2096 // SIMD-ONLY1-NEXT:    ret void
2097 //
2098 //
2099 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
2100 // SIMD-ONLY1-SAME: () #[[ATTR3:[0-9]+]] {
2101 // SIMD-ONLY1-NEXT:  entry:
2102 // SIMD-ONLY1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2103 // SIMD-ONLY1-NEXT:    [[TTT:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
2104 // SIMD-ONLY1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0]], align 4
2105 // SIMD-ONLY1-NEXT:    [[T_VAR:%.*]] = alloca i32, align 128
2106 // SIMD-ONLY1-NEXT:    [[VEC:%.*]] = alloca [2 x i32], align 4
2107 // SIMD-ONLY1-NEXT:    [[S_ARR:%.*]] = alloca [2 x %struct.S.0], align 4
2108 // SIMD-ONLY1-NEXT:    [[VAR:%.*]] = alloca [[STRUCT_S_0]], align 4
2109 // SIMD-ONLY1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2110 // SIMD-ONLY1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
2111 // SIMD-ONLY1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
2112 // SIMD-ONLY1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2113 // SIMD-ONLY1-NEXT:    [[I:%.*]] = alloca i32, align 4
2114 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]])
2115 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]], i32 noundef 0)
2116 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[T_VAR]], align 128
2117 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = bitcast [2 x i32]* [[VEC]] to i8*
2118 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP0]], i8* align 4 bitcast ([2 x i32]* @__const._Z5tmainIiET_v.vec to i8*), i64 8, i1 false)
2119 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
2120 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN]], i32 noundef 1)
2121 // SIMD-ONLY1-NEXT:    [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYINIT_BEGIN]], i64 1
2122 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
2123 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC1Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]], i32 noundef 3)
2124 // SIMD-ONLY1-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2125 // SIMD-ONLY1-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
2126 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2127 // SIMD-ONLY1-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP1]] to i32
2128 // SIMD-ONLY1-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
2129 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2130 // SIMD-ONLY1:       omp.inner.for.cond:
2131 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6:![0-9]+]]
2132 // SIMD-ONLY1-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP2]] to i64
2133 // SIMD-ONLY1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP6]]
2134 // SIMD-ONLY1-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP3]]
2135 // SIMD-ONLY1-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2136 // SIMD-ONLY1:       omp.inner.for.body:
2137 // SIMD-ONLY1-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
2138 // SIMD-ONLY1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP4]], 1
2139 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2140 // SIMD-ONLY1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP6]]
2141 // SIMD-ONLY1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[T_VAR]], align 128, !llvm.access.group [[ACC_GRP6]]
2142 // SIMD-ONLY1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x i32], [2 x i32]* [[VEC]], i64 0, i64 0
2143 // SIMD-ONLY1-NEXT:    store i32 [[TMP5]], i32* [[ARRAYIDX]], align 4, !llvm.access.group [[ACC_GRP6]]
2144 // SIMD-ONLY1-NEXT:    [[ARRAYIDX2:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i64 0, i64 0
2145 // SIMD-ONLY1-NEXT:    [[TMP6:%.*]] = bitcast %struct.S.0* [[ARRAYIDX2]] to i8*
2146 // SIMD-ONLY1-NEXT:    [[TMP7:%.*]] = bitcast %struct.S.0* [[VAR]] to i8*
2147 // SIMD-ONLY1-NEXT:    call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 4 [[TMP6]], i8* align 4 [[TMP7]], i64 4, i1 false), !llvm.access.group [[ACC_GRP6]]
2148 // SIMD-ONLY1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2149 // SIMD-ONLY1:       omp.body.continue:
2150 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2151 // SIMD-ONLY1:       omp.inner.for.inc:
2152 // SIMD-ONLY1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
2153 // SIMD-ONLY1-NEXT:    [[ADD3:%.*]] = add nsw i32 [[TMP8]], 1
2154 // SIMD-ONLY1-NEXT:    store i32 [[ADD3]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP6]]
2155 // SIMD-ONLY1-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP7:![0-9]+]]
2156 // SIMD-ONLY1:       omp.inner.for.end:
2157 // SIMD-ONLY1-NEXT:    store i32 10, i32* [[I]], align 4
2158 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2159 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[VAR]]) #[[ATTR4]]
2160 // SIMD-ONLY1-NEXT:    [[ARRAY_BEGIN:%.*]] = getelementptr inbounds [2 x %struct.S.0], [2 x %struct.S.0]* [[S_ARR]], i32 0, i32 0
2161 // SIMD-ONLY1-NEXT:    [[TMP9:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAY_BEGIN]], i64 2
2162 // SIMD-ONLY1-NEXT:    br label [[ARRAYDESTROY_BODY:%.*]]
2163 // SIMD-ONLY1:       arraydestroy.body:
2164 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi %struct.S.0* [ [[TMP9]], [[OMP_INNER_FOR_END]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ]
2165 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[ARRAYDESTROY_ELEMENTPAST]], i64 -1
2166 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]]
2167 // SIMD-ONLY1-NEXT:    [[ARRAYDESTROY_DONE:%.*]] = icmp eq %struct.S.0* [[ARRAYDESTROY_ELEMENT]], [[ARRAY_BEGIN]]
2168 // SIMD-ONLY1-NEXT:    br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY]]
2169 // SIMD-ONLY1:       arraydestroy.done4:
2170 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
2171 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TTT]]) #[[ATTR4]]
2172 // SIMD-ONLY1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[RETVAL]], align 4
2173 // SIMD-ONLY1-NEXT:    ret i32 [[TMP10]]
2174 //
2175 //
2176 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdED1Ev
2177 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2178 // SIMD-ONLY1-NEXT:  entry:
2179 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2180 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2181 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2182 // SIMD-ONLY1-NEXT:    call void @_ZN1SIdED2Ev(%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS1]]) #[[ATTR4]]
2183 // SIMD-ONLY1-NEXT:    ret void
2184 //
2185 //
2186 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ev
2187 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2188 // SIMD-ONLY1-NEXT:  entry:
2189 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2190 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2191 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2192 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2193 // SIMD-ONLY1-NEXT:    store double 0.000000e+00, double* [[F]], align 8
2194 // SIMD-ONLY1-NEXT:    ret void
2195 //
2196 //
2197 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdED2Ev
2198 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2199 // SIMD-ONLY1-NEXT:  entry:
2200 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2201 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2202 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2203 // SIMD-ONLY1-NEXT:    ret void
2204 //
2205 //
2206 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2ERKS0_d
2207 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], %struct.S* noundef nonnull align 8 dereferenceable(8) [[S:%.*]], double noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2208 // SIMD-ONLY1-NEXT:  entry:
2209 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2210 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S*, align 8
2211 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca double, align 8
2212 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2213 // SIMD-ONLY1-NEXT:    store %struct.S* [[S]], %struct.S** [[S_ADDR]], align 8
2214 // SIMD-ONLY1-NEXT:    store double [[T]], double* [[T_ADDR]], align 8
2215 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2216 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2217 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S*, %struct.S** [[S_ADDR]], align 8
2218 // SIMD-ONLY1-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TMP0]], i32 0, i32 0
2219 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load double, double* [[F2]], align 8
2220 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load double, double* [[T_ADDR]], align 8
2221 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = fadd double [[TMP1]], [[TMP2]]
2222 // SIMD-ONLY1-NEXT:    store double [[ADD]], double* [[F]], align 8
2223 // SIMD-ONLY1-NEXT:    ret void
2224 //
2225 //
2226 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIdEC2Ed
2227 // SIMD-ONLY1-SAME: (%struct.S* noundef nonnull align 8 dereferenceable(8) [[THIS:%.*]], double noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2228 // SIMD-ONLY1-NEXT:  entry:
2229 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
2230 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca double, align 8
2231 // SIMD-ONLY1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
2232 // SIMD-ONLY1-NEXT:    store double [[A]], double* [[A_ADDR]], align 8
2233 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
2234 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
2235 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load double, double* [[A_ADDR]], align 8
2236 // SIMD-ONLY1-NEXT:    store double [[TMP0]], double* [[F]], align 8
2237 // SIMD-ONLY1-NEXT:    ret void
2238 //
2239 //
2240 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
2241 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2242 // SIMD-ONLY1-NEXT:  entry:
2243 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2244 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2245 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2246 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
2247 // SIMD-ONLY1-NEXT:    ret void
2248 //
2249 //
2250 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1ERKS0_i
2251 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2252 // SIMD-ONLY1-NEXT:  entry:
2253 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2254 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
2255 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
2256 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2257 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
2258 // SIMD-ONLY1-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
2259 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2260 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
2261 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[T_ADDR]], align 4
2262 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2ERKS0_i(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TMP0]], i32 noundef [[TMP1]])
2263 // SIMD-ONLY1-NEXT:    ret void
2264 //
2265 //
2266 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ei
2267 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2268 // SIMD-ONLY1-NEXT:  entry:
2269 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2270 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2271 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2272 // SIMD-ONLY1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2273 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2274 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2275 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiEC2Ei(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]])
2276 // SIMD-ONLY1-NEXT:    ret void
2277 //
2278 //
2279 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
2280 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2281 // SIMD-ONLY1-NEXT:  entry:
2282 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2283 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2284 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2285 // SIMD-ONLY1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
2286 // SIMD-ONLY1-NEXT:    ret void
2287 //
2288 //
2289 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
2290 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2291 // SIMD-ONLY1-NEXT:  entry:
2292 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2293 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2294 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2295 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2296 // SIMD-ONLY1-NEXT:    store i32 0, i32* [[F]], align 4
2297 // SIMD-ONLY1-NEXT:    ret void
2298 //
2299 //
2300 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2ERKS0_i
2301 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], %struct.S.0* noundef nonnull align 4 dereferenceable(4) [[S:%.*]], i32 noundef [[T:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2302 // SIMD-ONLY1-NEXT:  entry:
2303 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2304 // SIMD-ONLY1-NEXT:    [[S_ADDR:%.*]] = alloca %struct.S.0*, align 8
2305 // SIMD-ONLY1-NEXT:    [[T_ADDR:%.*]] = alloca i32, align 4
2306 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2307 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[S]], %struct.S.0** [[S_ADDR]], align 8
2308 // SIMD-ONLY1-NEXT:    store i32 [[T]], i32* [[T_ADDR]], align 4
2309 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2310 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2311 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load %struct.S.0*, %struct.S.0** [[S_ADDR]], align 8
2312 // SIMD-ONLY1-NEXT:    [[F2:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TMP0]], i32 0, i32 0
2313 // SIMD-ONLY1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[F2]], align 4
2314 // SIMD-ONLY1-NEXT:    [[TMP2:%.*]] = load i32, i32* [[T_ADDR]], align 4
2315 // SIMD-ONLY1-NEXT:    [[ADD:%.*]] = add nsw i32 [[TMP1]], [[TMP2]]
2316 // SIMD-ONLY1-NEXT:    store i32 [[ADD]], i32* [[F]], align 4
2317 // SIMD-ONLY1-NEXT:    ret void
2318 //
2319 //
2320 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ei
2321 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2322 // SIMD-ONLY1-NEXT:  entry:
2323 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2324 // SIMD-ONLY1-NEXT:    [[A_ADDR:%.*]] = alloca i32, align 4
2325 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2326 // SIMD-ONLY1-NEXT:    store i32 [[A]], i32* [[A_ADDR]], align 4
2327 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2328 // SIMD-ONLY1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
2329 // SIMD-ONLY1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[A_ADDR]], align 4
2330 // SIMD-ONLY1-NEXT:    store i32 [[TMP0]], i32* [[F]], align 4
2331 // SIMD-ONLY1-NEXT:    ret void
2332 //
2333 //
2334 // SIMD-ONLY1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
2335 // SIMD-ONLY1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
2336 // SIMD-ONLY1-NEXT:  entry:
2337 // SIMD-ONLY1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
2338 // SIMD-ONLY1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
2339 // SIMD-ONLY1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
2340 // SIMD-ONLY1-NEXT:    ret void
2341 //
2342 //
2343 // SIMD-ONLY2-LABEL: define {{[^@]+}}@main
2344 // SIMD-ONLY2-SAME: () #[[ATTR0:[0-9]+]] {
2345 // SIMD-ONLY2-NEXT:  entry:
2346 // SIMD-ONLY2-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2347 // SIMD-ONLY2-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
2348 // SIMD-ONLY2-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2349 // SIMD-ONLY2-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
2350 // SIMD-ONLY2-NEXT:    ret i32 0
2351 //
2352 //
2353 // SIMD-ONLY3-LABEL: define {{[^@]+}}@main
2354 // SIMD-ONLY3-SAME: () #[[ATTR1:[0-9]+]] {
2355 // SIMD-ONLY3-NEXT:  entry:
2356 // SIMD-ONLY3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
2357 // SIMD-ONLY3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
2358 // SIMD-ONLY3-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
2359 // SIMD-ONLY3-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
2360 // SIMD-ONLY3-NEXT:    call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
2361 // SIMD-ONLY3-NEXT:    ret i32 0
2362 //
2363 //
2364 // SIMD-ONLY3-LABEL: define {{[^@]+}}@__main_block_invoke
2365 // SIMD-ONLY3-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
2366 // SIMD-ONLY3-NEXT:  entry:
2367 // SIMD-ONLY3-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2368 // SIMD-ONLY3-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
2369 // SIMD-ONLY3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2370 // SIMD-ONLY3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
2371 // SIMD-ONLY3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
2372 // SIMD-ONLY3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2373 // SIMD-ONLY3-NEXT:    [[I:%.*]] = alloca i32, align 4
2374 // SIMD-ONLY3-NEXT:    [[BLOCK2:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, align 8
2375 // SIMD-ONLY3-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2376 // SIMD-ONLY3-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
2377 // SIMD-ONLY3-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
2378 // SIMD-ONLY3-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2379 // SIMD-ONLY3-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
2380 // SIMD-ONLY3-NEXT:    [[TMP0:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2381 // SIMD-ONLY3-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP0]] to i32
2382 // SIMD-ONLY3-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
2383 // SIMD-ONLY3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2384 // SIMD-ONLY3:       omp.inner.for.cond:
2385 // SIMD-ONLY3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
2386 // SIMD-ONLY3-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP1]] to i64
2387 // SIMD-ONLY3-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]]
2388 // SIMD-ONLY3-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP2]]
2389 // SIMD-ONLY3-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2390 // SIMD-ONLY3:       omp.inner.for.body:
2391 // SIMD-ONLY3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2392 // SIMD-ONLY3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP3]], 1
2393 // SIMD-ONLY3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2394 // SIMD-ONLY3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
2395 // SIMD-ONLY3-NEXT:    store double 1.000000e+00, double* @g, align 8, !llvm.access.group [[ACC_GRP2]]
2396 // SIMD-ONLY3-NEXT:    store i32 11, i32* @_ZZ4mainE5sivar, align 4, !llvm.access.group [[ACC_GRP2]]
2397 // SIMD-ONLY3-NEXT:    [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 0
2398 // SIMD-ONLY3-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8, !llvm.access.group [[ACC_GRP2]]
2399 // SIMD-ONLY3-NEXT:    [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 1
2400 // SIMD-ONLY3-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8, !llvm.access.group [[ACC_GRP2]]
2401 // SIMD-ONLY3-NEXT:    [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 2
2402 // SIMD-ONLY3-NEXT:    store i32 0, i32* [[BLOCK_RESERVED]], align 4, !llvm.access.group [[ACC_GRP2]]
2403 // SIMD-ONLY3-NEXT:    [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 3
2404 // SIMD-ONLY3-NEXT:    store i8* bitcast (void (i8*)* @__main_block_invoke_2 to i8*), i8** [[BLOCK_INVOKE]], align 8, !llvm.access.group [[ACC_GRP2]]
2405 // SIMD-ONLY3-NEXT:    [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 4
2406 // SIMD-ONLY3-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8, !llvm.access.group [[ACC_GRP2]]
2407 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 5
2408 // SIMD-ONLY3-NEXT:    [[TMP4:%.*]] = load volatile double, double* @g, align 8, !llvm.access.group [[ACC_GRP2]]
2409 // SIMD-ONLY3-NEXT:    store volatile double [[TMP4]], double* [[BLOCK_CAPTURED]], align 8, !llvm.access.group [[ACC_GRP2]]
2410 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURED3:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]], i32 0, i32 6
2411 // SIMD-ONLY3-NEXT:    [[TMP5:%.*]] = load i32, i32* @_ZZ4mainE5sivar, align 4, !llvm.access.group [[ACC_GRP2]]
2412 // SIMD-ONLY3-NEXT:    store i32 [[TMP5]], i32* [[BLOCK_CAPTURED3]], align 8, !llvm.access.group [[ACC_GRP2]]
2413 // SIMD-ONLY3-NEXT:    [[TMP6:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK2]] to void ()*
2414 // SIMD-ONLY3-NEXT:    [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP6]] to %struct.__block_literal_generic*
2415 // SIMD-ONLY3-NEXT:    [[TMP7:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
2416 // SIMD-ONLY3-NEXT:    [[TMP8:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
2417 // SIMD-ONLY3-NEXT:    [[TMP9:%.*]] = load i8*, i8** [[TMP7]], align 8, !llvm.access.group [[ACC_GRP2]]
2418 // SIMD-ONLY3-NEXT:    [[TMP10:%.*]] = bitcast i8* [[TMP9]] to void (i8*)*
2419 // SIMD-ONLY3-NEXT:    call void [[TMP10]](i8* noundef [[TMP8]]), !llvm.access.group [[ACC_GRP2]]
2420 // SIMD-ONLY3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2421 // SIMD-ONLY3:       omp.body.continue:
2422 // SIMD-ONLY3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2423 // SIMD-ONLY3:       omp.inner.for.inc:
2424 // SIMD-ONLY3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2425 // SIMD-ONLY3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP11]], 1
2426 // SIMD-ONLY3-NEXT:    store i32 [[ADD4]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2427 // SIMD-ONLY3-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
2428 // SIMD-ONLY3:       omp.inner.for.end:
2429 // SIMD-ONLY3-NEXT:    store i32 10, i32* [[I]], align 4
2430 // SIMD-ONLY3-NEXT:    ret void
2431 //
2432 //
2433 // SIMD-ONLY3-LABEL: define {{[^@]+}}@__main_block_invoke_2
2434 // SIMD-ONLY3-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
2435 // SIMD-ONLY3-NEXT:  entry:
2436 // SIMD-ONLY3-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
2437 // SIMD-ONLY3-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*, align 8
2438 // SIMD-ONLY3-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
2439 // SIMD-ONLY3-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>*
2440 // SIMD-ONLY3-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>** [[BLOCK_ADDR]], align 8
2441 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 5
2442 // SIMD-ONLY3-NEXT:    store double 2.000000e+00, double* [[BLOCK_CAPTURE_ADDR]], align 8
2443 // SIMD-ONLY3-NEXT:    [[BLOCK_CAPTURE_ADDR1:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, double, i32 }>* [[BLOCK]], i32 0, i32 6
2444 // SIMD-ONLY3-NEXT:    store i32 22, i32* [[BLOCK_CAPTURE_ADDR1]], align 8
2445 // SIMD-ONLY3-NEXT:    ret void
2446 //
2447 //
2448 // SIMD-ONLY4-LABEL: define {{[^@]+}}@_Z10array_funciPfP2St
2449 // SIMD-ONLY4-SAME: (i32 noundef [[N:%.*]], float* noundef [[A:%.*]], %struct.St* noundef [[S:%.*]]) #[[ATTR0:[0-9]+]] {
2450 // SIMD-ONLY4-NEXT:  entry:
2451 // SIMD-ONLY4-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
2452 // SIMD-ONLY4-NEXT:    [[A_ADDR:%.*]] = alloca float*, align 8
2453 // SIMD-ONLY4-NEXT:    [[S_ADDR:%.*]] = alloca %struct.St*, align 8
2454 // SIMD-ONLY4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
2455 // SIMD-ONLY4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i64, align 8
2456 // SIMD-ONLY4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i64, align 8
2457 // SIMD-ONLY4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
2458 // SIMD-ONLY4-NEXT:    [[I:%.*]] = alloca i32, align 4
2459 // SIMD-ONLY4-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
2460 // SIMD-ONLY4-NEXT:    store float* [[A]], float** [[A_ADDR]], align 8
2461 // SIMD-ONLY4-NEXT:    store %struct.St* [[S]], %struct.St** [[S_ADDR]], align 8
2462 // SIMD-ONLY4-NEXT:    [[TMP0:%.*]] = load i32, i32* [[N_ADDR]], align 4
2463 // SIMD-ONLY4-NEXT:    [[TMP1:%.*]] = zext i32 [[TMP0]] to i64
2464 // SIMD-ONLY4-NEXT:    store i64 0, i64* [[DOTOMP_LB]], align 8
2465 // SIMD-ONLY4-NEXT:    store i64 9, i64* [[DOTOMP_UB]], align 8
2466 // SIMD-ONLY4-NEXT:    [[TMP2:%.*]] = load i64, i64* [[DOTOMP_LB]], align 8
2467 // SIMD-ONLY4-NEXT:    [[CONV:%.*]] = trunc i64 [[TMP2]] to i32
2468 // SIMD-ONLY4-NEXT:    store i32 [[CONV]], i32* [[DOTOMP_IV]], align 4
2469 // SIMD-ONLY4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
2470 // SIMD-ONLY4:       omp.inner.for.cond:
2471 // SIMD-ONLY4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2:![0-9]+]]
2472 // SIMD-ONLY4-NEXT:    [[CONV1:%.*]] = sext i32 [[TMP3]] to i64
2473 // SIMD-ONLY4-NEXT:    [[TMP4:%.*]] = load i64, i64* [[DOTOMP_UB]], align 8, !llvm.access.group [[ACC_GRP2]]
2474 // SIMD-ONLY4-NEXT:    [[CMP:%.*]] = icmp ule i64 [[CONV1]], [[TMP4]]
2475 // SIMD-ONLY4-NEXT:    br i1 [[CMP]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
2476 // SIMD-ONLY4:       omp.inner.for.body:
2477 // SIMD-ONLY4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2478 // SIMD-ONLY4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP5]], 1
2479 // SIMD-ONLY4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
2480 // SIMD-ONLY4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4, !llvm.access.group [[ACC_GRP2]]
2481 // SIMD-ONLY4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
2482 // SIMD-ONLY4:       omp.body.continue:
2483 // SIMD-ONLY4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
2484 // SIMD-ONLY4:       omp.inner.for.inc:
2485 // SIMD-ONLY4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2486 // SIMD-ONLY4-NEXT:    [[ADD2:%.*]] = add nsw i32 [[TMP6]], 1
2487 // SIMD-ONLY4-NEXT:    store i32 [[ADD2]], i32* [[DOTOMP_IV]], align 4, !llvm.access.group [[ACC_GRP2]]
2488 // SIMD-ONLY4-NEXT:    br label [[OMP_INNER_FOR_COND]], !llvm.loop [[LOOP3:![0-9]+]]
2489 // SIMD-ONLY4:       omp.inner.for.end:
2490 // SIMD-ONLY4-NEXT:    store i32 10, i32* [[I]], align 4
2491 // SIMD-ONLY4-NEXT:    ret void
2492 //
2493