xref: /llvm-project/clang/test/OpenMP/parallel_for_linear_codegen.cpp (revision b52d33e6de554fbabb6e80c661c4272a900daece)
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
2 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
3 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
4 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --check-prefix=CHECK1
5 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK3
6 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK4
7 
8 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
9 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -std=c++11 -triple x86_64-apple-darwin10 -emit-pch -o %t %s
10 // RUN: %clang_cc1 -no-opaque-pointers -fopenmp-simd -x c++ -triple x86_64-apple-darwin10 -std=c++11 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
11 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -std=c++11 -DLAMBDA -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
12 // RUN: %clang_cc1 -no-opaque-pointers -verify -fopenmp-simd -x c++ -fblocks -DBLOCKS -triple x86_64-apple-darwin10 -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
13 // expected-no-diagnostics
14 #ifndef HEADER
15 #define HEADER
16 
17 template <class T>
18 struct S {
19   T f;
20   S(T a) : f(a) {}
21   S() : f() {}
22   S<T> &operator=(const S<T> &);
23   operator T() { return T(); }
24   ~S() {}
25 };
26 
27 volatile int g = 1212;
28 float f;
29 char cnt;
30 
31 template <typename T>
32 T tmain() {
33   S<T> test;
34   T *pvar = &test.f;
35   T lvar = T();
36 #pragma omp parallel for linear(pvar, lvar)
37   for (int i = 0; i < 2; ++i) {
38     ++pvar, ++lvar;
39   }
40   return T();
41 }
42 
43 int main() {
44 #ifdef LAMBDA
45   [&]() {
46 #pragma omp parallel for linear(g:5)
47   for (int i = 0; i < 2; ++i) {
48     g += 5;
49     [&]() {
50       g = 2;
51     }();
52   }
53   }();
54   return 0;
55 #elif defined(BLOCKS)
56   ^{
57 #pragma omp parallel for linear(g:5)
58   for (int i = 0; i < 2; ++i) {
59     g += 5;
60     g = 1;
61     ^{
62       g = 2;
63     }();
64   }
65   }();
66   return 0;
67 #else
68   S<float> test;
69   float *pvar = &test.f;
70   long long lvar = 0;
71 #pragma omp parallel for linear(pvar, lvar : 3)
72   for (int i = 0; i < 2; ++i) {
73     pvar += 3, lvar += 3;
74   }
75   return tmain<int>();
76 #endif
77 }
78 
79 
80 
81 
82 // Check for default initialization.
83 
84 
85 
86 // Check for default initialization.
87 #endif
88 
89 // CHECK1-LABEL: define {{[^@]+}}@main
90 // CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
91 // CHECK1-NEXT:  entry:
92 // CHECK1-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
93 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S:%.*]], align 4
94 // CHECK1-NEXT:    [[PVAR:%.*]] = alloca float*, align 8
95 // CHECK1-NEXT:    [[LVAR:%.*]] = alloca i64, align 8
96 // CHECK1-NEXT:    store i32 0, i32* [[RETVAL]], align 4
97 // CHECK1-NEXT:    call void @_ZN1SIfEC1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]])
98 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S]], %struct.S* [[TEST]], i32 0, i32 0
99 // CHECK1-NEXT:    store float* [[F]], float** [[PVAR]], align 8
100 // CHECK1-NEXT:    store i64 0, i64* [[LVAR]], align 8
101 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, float**, i64*)* @.omp_outlined. to void (i32*, i32*, ...)*), float** [[PVAR]], i64* [[LVAR]])
102 // CHECK1-NEXT:    [[CALL:%.*]] = call noundef i32 @_Z5tmainIiET_v()
103 // CHECK1-NEXT:    store i32 [[CALL]], i32* [[RETVAL]], align 4
104 // CHECK1-NEXT:    call void @_ZN1SIfED1Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4:[0-9]+]]
105 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32, i32* [[RETVAL]], align 4
106 // CHECK1-NEXT:    ret i32 [[TMP0]]
107 //
108 //
109 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC1Ev
110 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] align 2 {
111 // CHECK1-NEXT:  entry:
112 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
113 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
114 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
115 // CHECK1-NEXT:    call void @_ZN1SIfEC2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
116 // CHECK1-NEXT:    ret void
117 //
118 //
119 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
120 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], float** noundef nonnull align 8 dereferenceable(8) [[PVAR:%.*]], i64* noundef nonnull align 8 dereferenceable(8) [[LVAR:%.*]]) #[[ATTR2:[0-9]+]] {
121 // CHECK1-NEXT:  entry:
122 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
123 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
124 // CHECK1-NEXT:    [[PVAR_ADDR:%.*]] = alloca float**, align 8
125 // CHECK1-NEXT:    [[LVAR_ADDR:%.*]] = alloca i64*, align 8
126 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
127 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
128 // CHECK1-NEXT:    [[DOTLINEAR_START:%.*]] = alloca float*, align 8
129 // CHECK1-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i64, align 8
130 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
131 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
132 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
133 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
134 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
135 // CHECK1-NEXT:    [[PVAR2:%.*]] = alloca float*, align 8
136 // CHECK1-NEXT:    [[LVAR3:%.*]] = alloca i64, align 8
137 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
138 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
139 // CHECK1-NEXT:    store float** [[PVAR]], float*** [[PVAR_ADDR]], align 8
140 // CHECK1-NEXT:    store i64* [[LVAR]], i64** [[LVAR_ADDR]], align 8
141 // CHECK1-NEXT:    [[TMP0:%.*]] = load float**, float*** [[PVAR_ADDR]], align 8
142 // CHECK1-NEXT:    [[TMP1:%.*]] = load i64*, i64** [[LVAR_ADDR]], align 8
143 // CHECK1-NEXT:    [[TMP2:%.*]] = load float*, float** [[TMP0]], align 8
144 // CHECK1-NEXT:    store float* [[TMP2]], float** [[DOTLINEAR_START]], align 8
145 // CHECK1-NEXT:    [[TMP3:%.*]] = load i64, i64* [[TMP1]], align 8
146 // CHECK1-NEXT:    store i64 [[TMP3]], i64* [[DOTLINEAR_START1]], align 8
147 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
148 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
149 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
150 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
151 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
152 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
153 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP5]])
154 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
155 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
156 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
157 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
158 // CHECK1:       cond.true:
159 // CHECK1-NEXT:    br label [[COND_END:%.*]]
160 // CHECK1:       cond.false:
161 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
162 // CHECK1-NEXT:    br label [[COND_END]]
163 // CHECK1:       cond.end:
164 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
165 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
166 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
167 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
168 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
169 // CHECK1:       omp.inner.for.cond:
170 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
171 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
172 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
173 // CHECK1-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
174 // CHECK1:       omp.inner.for.body:
175 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
176 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
177 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
178 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
179 // CHECK1-NEXT:    [[TMP12:%.*]] = load float*, float** [[DOTLINEAR_START]], align 8
180 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
181 // CHECK1-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[TMP13]], 3
182 // CHECK1-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[MUL5]] to i64
183 // CHECK1-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDX_EXT]]
184 // CHECK1-NEXT:    store float* [[ADD_PTR]], float** [[PVAR2]], align 8
185 // CHECK1-NEXT:    [[TMP14:%.*]] = load i64, i64* [[DOTLINEAR_START1]], align 8
186 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
187 // CHECK1-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 3
188 // CHECK1-NEXT:    [[CONV:%.*]] = sext i32 [[MUL6]] to i64
189 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i64 [[TMP14]], [[CONV]]
190 // CHECK1-NEXT:    store i64 [[ADD7]], i64* [[LVAR3]], align 8
191 // CHECK1-NEXT:    [[TMP16:%.*]] = load float*, float** [[PVAR2]], align 8
192 // CHECK1-NEXT:    [[ADD_PTR8:%.*]] = getelementptr inbounds float, float* [[TMP16]], i64 3
193 // CHECK1-NEXT:    store float* [[ADD_PTR8]], float** [[PVAR2]], align 8
194 // CHECK1-NEXT:    [[TMP17:%.*]] = load i64, i64* [[LVAR3]], align 8
195 // CHECK1-NEXT:    [[ADD9:%.*]] = add nsw i64 [[TMP17]], 3
196 // CHECK1-NEXT:    store i64 [[ADD9]], i64* [[LVAR3]], align 8
197 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
198 // CHECK1:       omp.body.continue:
199 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
200 // CHECK1:       omp.inner.for.inc:
201 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
202 // CHECK1-NEXT:    [[ADD10:%.*]] = add nsw i32 [[TMP18]], 1
203 // CHECK1-NEXT:    store i32 [[ADD10]], i32* [[DOTOMP_IV]], align 4
204 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
205 // CHECK1:       omp.inner.for.end:
206 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
207 // CHECK1:       omp.loop.exit:
208 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
209 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
210 // CHECK1-NEXT:    [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
211 // CHECK1-NEXT:    br i1 [[TMP20]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
212 // CHECK1:       .omp.linear.pu:
213 // CHECK1-NEXT:    [[TMP21:%.*]] = load float*, float** [[PVAR2]], align 8
214 // CHECK1-NEXT:    store float* [[TMP21]], float** [[TMP0]], align 8
215 // CHECK1-NEXT:    [[TMP22:%.*]] = load i64, i64* [[LVAR3]], align 8
216 // CHECK1-NEXT:    store i64 [[TMP22]], i64* [[TMP1]], align 8
217 // CHECK1-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
218 // CHECK1:       .omp.linear.pu.done:
219 // CHECK1-NEXT:    ret void
220 //
221 //
222 // CHECK1-LABEL: define {{[^@]+}}@_Z5tmainIiET_v
223 // CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
224 // CHECK1-NEXT:  entry:
225 // CHECK1-NEXT:    [[TEST:%.*]] = alloca [[STRUCT_S_0:%.*]], align 4
226 // CHECK1-NEXT:    [[PVAR:%.*]] = alloca i32*, align 8
227 // CHECK1-NEXT:    [[LVAR:%.*]] = alloca i32, align 4
228 // CHECK1-NEXT:    call void @_ZN1SIiEC1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]])
229 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0]], %struct.S.0* [[TEST]], i32 0, i32 0
230 // CHECK1-NEXT:    store i32* [[F]], i32** [[PVAR]], align 8
231 // CHECK1-NEXT:    store i32 0, i32* [[LVAR]], align 4
232 // CHECK1-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3]], i32 2, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32**, i32*)* @.omp_outlined..1 to void (i32*, i32*, ...)*), i32** [[PVAR]], i32* [[LVAR]])
233 // CHECK1-NEXT:    call void @_ZN1SIiED1Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[TEST]]) #[[ATTR4]]
234 // CHECK1-NEXT:    ret i32 0
235 //
236 //
237 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED1Ev
238 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
239 // CHECK1-NEXT:  entry:
240 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
241 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
242 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
243 // CHECK1-NEXT:    call void @_ZN1SIfED2Ev(%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
244 // CHECK1-NEXT:    ret void
245 //
246 //
247 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfEC2Ev
248 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
249 // CHECK1-NEXT:  entry:
250 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
251 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
252 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
253 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S:%.*]], %struct.S* [[THIS1]], i32 0, i32 0
254 // CHECK1-NEXT:    store float 0.000000e+00, float* [[F]], align 4
255 // CHECK1-NEXT:    ret void
256 //
257 //
258 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC1Ev
259 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
260 // CHECK1-NEXT:  entry:
261 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
262 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
263 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
264 // CHECK1-NEXT:    call void @_ZN1SIiEC2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]])
265 // CHECK1-NEXT:    ret void
266 //
267 //
268 // CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
269 // CHECK1-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32** noundef nonnull align 8 dereferenceable(8) [[PVAR:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[LVAR:%.*]]) #[[ATTR2]] {
270 // CHECK1-NEXT:  entry:
271 // CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
272 // CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
273 // CHECK1-NEXT:    [[PVAR_ADDR:%.*]] = alloca i32**, align 8
274 // CHECK1-NEXT:    [[LVAR_ADDR:%.*]] = alloca i32*, align 8
275 // CHECK1-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
276 // CHECK1-NEXT:    [[TMP:%.*]] = alloca i32, align 4
277 // CHECK1-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32*, align 8
278 // CHECK1-NEXT:    [[DOTLINEAR_START1:%.*]] = alloca i32, align 4
279 // CHECK1-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
280 // CHECK1-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
281 // CHECK1-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
282 // CHECK1-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
283 // CHECK1-NEXT:    [[I:%.*]] = alloca i32, align 4
284 // CHECK1-NEXT:    [[PVAR2:%.*]] = alloca i32*, align 8
285 // CHECK1-NEXT:    [[LVAR3:%.*]] = alloca i32, align 4
286 // CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
287 // CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
288 // CHECK1-NEXT:    store i32** [[PVAR]], i32*** [[PVAR_ADDR]], align 8
289 // CHECK1-NEXT:    store i32* [[LVAR]], i32** [[LVAR_ADDR]], align 8
290 // CHECK1-NEXT:    [[TMP0:%.*]] = load i32**, i32*** [[PVAR_ADDR]], align 8
291 // CHECK1-NEXT:    [[TMP1:%.*]] = load i32*, i32** [[LVAR_ADDR]], align 8
292 // CHECK1-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[TMP0]], align 8
293 // CHECK1-NEXT:    store i32* [[TMP2]], i32** [[DOTLINEAR_START]], align 8
294 // CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP1]], align 4
295 // CHECK1-NEXT:    store i32 [[TMP3]], i32* [[DOTLINEAR_START1]], align 4
296 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
297 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
298 // CHECK1-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
299 // CHECK1-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
300 // CHECK1-NEXT:    [[TMP4:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
301 // CHECK1-NEXT:    [[TMP5:%.*]] = load i32, i32* [[TMP4]], align 4
302 // CHECK1-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1]], i32 [[TMP5]])
303 // CHECK1-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
304 // CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
305 // CHECK1-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP6]], 1
306 // CHECK1-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
307 // CHECK1:       cond.true:
308 // CHECK1-NEXT:    br label [[COND_END:%.*]]
309 // CHECK1:       cond.false:
310 // CHECK1-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
311 // CHECK1-NEXT:    br label [[COND_END]]
312 // CHECK1:       cond.end:
313 // CHECK1-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
314 // CHECK1-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
315 // CHECK1-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
316 // CHECK1-NEXT:    store i32 [[TMP8]], i32* [[DOTOMP_IV]], align 4
317 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
318 // CHECK1:       omp.inner.for.cond:
319 // CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
320 // CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
321 // CHECK1-NEXT:    [[CMP4:%.*]] = icmp sle i32 [[TMP9]], [[TMP10]]
322 // CHECK1-NEXT:    br i1 [[CMP4]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
323 // CHECK1:       omp.inner.for.body:
324 // CHECK1-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
325 // CHECK1-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP11]], 1
326 // CHECK1-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
327 // CHECK1-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
328 // CHECK1-NEXT:    [[TMP12:%.*]] = load i32*, i32** [[DOTLINEAR_START]], align 8
329 // CHECK1-NEXT:    [[TMP13:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
330 // CHECK1-NEXT:    [[MUL5:%.*]] = mul nsw i32 [[TMP13]], 1
331 // CHECK1-NEXT:    [[IDX_EXT:%.*]] = sext i32 [[MUL5]] to i64
332 // CHECK1-NEXT:    [[ADD_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP12]], i64 [[IDX_EXT]]
333 // CHECK1-NEXT:    store i32* [[ADD_PTR]], i32** [[PVAR2]], align 8
334 // CHECK1-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTLINEAR_START1]], align 4
335 // CHECK1-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
336 // CHECK1-NEXT:    [[MUL6:%.*]] = mul nsw i32 [[TMP15]], 1
337 // CHECK1-NEXT:    [[ADD7:%.*]] = add nsw i32 [[TMP14]], [[MUL6]]
338 // CHECK1-NEXT:    store i32 [[ADD7]], i32* [[LVAR3]], align 4
339 // CHECK1-NEXT:    [[TMP16:%.*]] = load i32*, i32** [[PVAR2]], align 8
340 // CHECK1-NEXT:    [[INCDEC_PTR:%.*]] = getelementptr inbounds i32, i32* [[TMP16]], i32 1
341 // CHECK1-NEXT:    store i32* [[INCDEC_PTR]], i32** [[PVAR2]], align 8
342 // CHECK1-NEXT:    [[TMP17:%.*]] = load i32, i32* [[LVAR3]], align 4
343 // CHECK1-NEXT:    [[INC:%.*]] = add nsw i32 [[TMP17]], 1
344 // CHECK1-NEXT:    store i32 [[INC]], i32* [[LVAR3]], align 4
345 // CHECK1-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
346 // CHECK1:       omp.body.continue:
347 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
348 // CHECK1:       omp.inner.for.inc:
349 // CHECK1-NEXT:    [[TMP18:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
350 // CHECK1-NEXT:    [[ADD8:%.*]] = add nsw i32 [[TMP18]], 1
351 // CHECK1-NEXT:    store i32 [[ADD8]], i32* [[DOTOMP_IV]], align 4
352 // CHECK1-NEXT:    br label [[OMP_INNER_FOR_COND]]
353 // CHECK1:       omp.inner.for.end:
354 // CHECK1-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
355 // CHECK1:       omp.loop.exit:
356 // CHECK1-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP5]])
357 // CHECK1-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
358 // CHECK1-NEXT:    [[TMP20:%.*]] = icmp ne i32 [[TMP19]], 0
359 // CHECK1-NEXT:    br i1 [[TMP20]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
360 // CHECK1:       .omp.linear.pu:
361 // CHECK1-NEXT:    [[TMP21:%.*]] = load i32*, i32** [[PVAR2]], align 8
362 // CHECK1-NEXT:    store i32* [[TMP21]], i32** [[TMP0]], align 8
363 // CHECK1-NEXT:    [[TMP22:%.*]] = load i32, i32* [[LVAR3]], align 4
364 // CHECK1-NEXT:    store i32 [[TMP22]], i32* [[TMP1]], align 4
365 // CHECK1-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
366 // CHECK1:       .omp.linear.pu.done:
367 // CHECK1-NEXT:    ret void
368 //
369 //
370 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED1Ev
371 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
372 // CHECK1-NEXT:  entry:
373 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
374 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
375 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
376 // CHECK1-NEXT:    call void @_ZN1SIiED2Ev(%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]]
377 // CHECK1-NEXT:    ret void
378 //
379 //
380 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiEC2Ev
381 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
382 // CHECK1-NEXT:  entry:
383 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
384 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
385 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
386 // CHECK1-NEXT:    [[F:%.*]] = getelementptr inbounds [[STRUCT_S_0:%.*]], %struct.S.0* [[THIS1]], i32 0, i32 0
387 // CHECK1-NEXT:    store i32 0, i32* [[F]], align 4
388 // CHECK1-NEXT:    ret void
389 //
390 //
391 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIiED2Ev
392 // CHECK1-SAME: (%struct.S.0* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
393 // CHECK1-NEXT:  entry:
394 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S.0*, align 8
395 // CHECK1-NEXT:    store %struct.S.0* [[THIS]], %struct.S.0** [[THIS_ADDR]], align 8
396 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S.0*, %struct.S.0** [[THIS_ADDR]], align 8
397 // CHECK1-NEXT:    ret void
398 //
399 //
400 // CHECK1-LABEL: define {{[^@]+}}@_ZN1SIfED2Ev
401 // CHECK1-SAME: (%struct.S* noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR1]] align 2 {
402 // CHECK1-NEXT:  entry:
403 // CHECK1-NEXT:    [[THIS_ADDR:%.*]] = alloca %struct.S*, align 8
404 // CHECK1-NEXT:    store %struct.S* [[THIS]], %struct.S** [[THIS_ADDR]], align 8
405 // CHECK1-NEXT:    [[THIS1:%.*]] = load %struct.S*, %struct.S** [[THIS_ADDR]], align 8
406 // CHECK1-NEXT:    ret void
407 //
408 //
409 // CHECK3-LABEL: define {{[^@]+}}@main
410 // CHECK3-SAME: () #[[ATTR0:[0-9]+]] {
411 // CHECK3-NEXT:  entry:
412 // CHECK3-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
413 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON:%.*]], align 1
414 // CHECK3-NEXT:    store i32 0, i32* [[RETVAL]], align 4
415 // CHECK3-NEXT:    call void @"_ZZ4mainENK3$_0clEv"(%class.anon* noundef nonnull align 1 dereferenceable(1) [[REF_TMP]])
416 // CHECK3-NEXT:    ret i32 0
417 //
418 //
419 // CHECK3-LABEL: define {{[^@]+}}@.omp_outlined.
420 // CHECK3-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR2:[0-9]+]] {
421 // CHECK3-NEXT:  entry:
422 // CHECK3-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
423 // CHECK3-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
424 // CHECK3-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
425 // CHECK3-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
426 // CHECK3-NEXT:    [[TMP:%.*]] = alloca i32, align 4
427 // CHECK3-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
428 // CHECK3-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
429 // CHECK3-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
430 // CHECK3-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
431 // CHECK3-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
432 // CHECK3-NEXT:    [[I:%.*]] = alloca i32, align 4
433 // CHECK3-NEXT:    [[G1:%.*]] = alloca i32, align 4
434 // CHECK3-NEXT:    [[REF_TMP:%.*]] = alloca [[CLASS_ANON_0:%.*]], align 8
435 // CHECK3-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
436 // CHECK3-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
437 // CHECK3-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
438 // CHECK3-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
439 // CHECK3-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
440 // CHECK3-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START]], align 4
441 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
442 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
443 // CHECK3-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
444 // CHECK3-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
445 // CHECK3-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
446 // CHECK3-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
447 // CHECK3-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]])
448 // CHECK3-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
449 // CHECK3-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
450 // CHECK3-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
451 // CHECK3-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
452 // CHECK3:       cond.true:
453 // CHECK3-NEXT:    br label [[COND_END:%.*]]
454 // CHECK3:       cond.false:
455 // CHECK3-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
456 // CHECK3-NEXT:    br label [[COND_END]]
457 // CHECK3:       cond.end:
458 // CHECK3-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
459 // CHECK3-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
460 // CHECK3-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
461 // CHECK3-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
462 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
463 // CHECK3:       omp.inner.for.cond:
464 // CHECK3-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
465 // CHECK3-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
466 // CHECK3-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
467 // CHECK3-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
468 // CHECK3:       omp.inner.for.body:
469 // CHECK3-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
470 // CHECK3-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
471 // CHECK3-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
472 // CHECK3-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
473 // CHECK3-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
474 // CHECK3-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
475 // CHECK3-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[TMP11]], 5
476 // CHECK3-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[MUL3]]
477 // CHECK3-NEXT:    store i32 [[ADD4]], i32* [[G1]], align 4
478 // CHECK3-NEXT:    [[TMP12:%.*]] = load i32, i32* [[G1]], align 4
479 // CHECK3-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP12]], 5
480 // CHECK3-NEXT:    store i32 [[ADD5]], i32* [[G1]], align 4
481 // CHECK3-NEXT:    [[TMP13:%.*]] = getelementptr inbounds [[CLASS_ANON_0]], %class.anon.0* [[REF_TMP]], i32 0, i32 0
482 // CHECK3-NEXT:    store i32* [[G1]], i32** [[TMP13]], align 8
483 // CHECK3-NEXT:    call void @"_ZZZ4mainENK3$_0clEvENKUlvE_clEv"(%class.anon.0* noundef nonnull align 8 dereferenceable(8) [[REF_TMP]])
484 // CHECK3-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
485 // CHECK3:       omp.body.continue:
486 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
487 // CHECK3:       omp.inner.for.inc:
488 // CHECK3-NEXT:    [[TMP14:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
489 // CHECK3-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP14]], 1
490 // CHECK3-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
491 // CHECK3-NEXT:    br label [[OMP_INNER_FOR_COND]]
492 // CHECK3:       omp.inner.for.end:
493 // CHECK3-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
494 // CHECK3:       omp.loop.exit:
495 // CHECK3-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
496 // CHECK3-NEXT:    [[TMP15:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
497 // CHECK3-NEXT:    [[TMP16:%.*]] = icmp ne i32 [[TMP15]], 0
498 // CHECK3-NEXT:    br i1 [[TMP16]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
499 // CHECK3:       .omp.linear.pu:
500 // CHECK3-NEXT:    [[TMP17:%.*]] = load i32, i32* [[G1]], align 4
501 // CHECK3-NEXT:    store i32 [[TMP17]], i32* [[TMP0]], align 4
502 // CHECK3-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
503 // CHECK3:       .omp.linear.pu.done:
504 // CHECK3-NEXT:    ret void
505 //
506 //
507 // CHECK4-LABEL: define {{[^@]+}}@main
508 // CHECK4-SAME: () #[[ATTR1:[0-9]+]] {
509 // CHECK4-NEXT:  entry:
510 // CHECK4-NEXT:    [[RETVAL:%.*]] = alloca i32, align 4
511 // CHECK4-NEXT:    store i32 0, i32* [[RETVAL]], align 4
512 // CHECK4-NEXT:    [[TMP0:%.*]] = load i8*, i8** getelementptr inbounds ([[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to %struct.__block_literal_generic*), i32 0, i32 3), align 8
513 // CHECK4-NEXT:    [[TMP1:%.*]] = bitcast i8* [[TMP0]] to void (i8*)*
514 // CHECK4-NEXT:    call void [[TMP1]](i8* noundef bitcast ({ i8**, i32, i32, i8*, %struct.__block_descriptor* }* @__block_literal_global to i8*))
515 // CHECK4-NEXT:    ret i32 0
516 //
517 //
518 // CHECK4-LABEL: define {{[^@]+}}@__main_block_invoke
519 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2:[0-9]+]] {
520 // CHECK4-NEXT:  entry:
521 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
522 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*, align 8
523 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
524 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>*
525 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor* }>** [[BLOCK_ADDR]], align 8
526 // CHECK4-NEXT:    call void (%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) @__kmpc_fork_call(%struct.ident_t* @[[GLOB3:[0-9]+]], i32 1, void (i32*, i32*, ...)* bitcast (void (i32*, i32*, i32*)* @.omp_outlined. to void (i32*, i32*, ...)*), i32* @g)
527 // CHECK4-NEXT:    ret void
528 //
529 //
530 // CHECK4-LABEL: define {{[^@]+}}@.omp_outlined.
531 // CHECK4-SAME: (i32* noalias noundef [[DOTGLOBAL_TID_:%.*]], i32* noalias noundef [[DOTBOUND_TID_:%.*]], i32* noundef nonnull align 4 dereferenceable(4) [[G:%.*]]) #[[ATTR3:[0-9]+]] {
532 // CHECK4-NEXT:  entry:
533 // CHECK4-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
534 // CHECK4-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
535 // CHECK4-NEXT:    [[G_ADDR:%.*]] = alloca i32*, align 8
536 // CHECK4-NEXT:    [[DOTOMP_IV:%.*]] = alloca i32, align 4
537 // CHECK4-NEXT:    [[TMP:%.*]] = alloca i32, align 4
538 // CHECK4-NEXT:    [[DOTLINEAR_START:%.*]] = alloca i32, align 4
539 // CHECK4-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
540 // CHECK4-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
541 // CHECK4-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
542 // CHECK4-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
543 // CHECK4-NEXT:    [[I:%.*]] = alloca i32, align 4
544 // CHECK4-NEXT:    [[G1:%.*]] = alloca i32, align 4
545 // CHECK4-NEXT:    [[BLOCK:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, align 8
546 // CHECK4-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
547 // CHECK4-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
548 // CHECK4-NEXT:    store i32* [[G]], i32** [[G_ADDR]], align 8
549 // CHECK4-NEXT:    [[TMP0:%.*]] = load i32*, i32** [[G_ADDR]], align 8
550 // CHECK4-NEXT:    [[TMP1:%.*]] = load i32, i32* [[TMP0]], align 4
551 // CHECK4-NEXT:    store i32 [[TMP1]], i32* [[DOTLINEAR_START]], align 4
552 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_LB]], align 4
553 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_UB]], align 4
554 // CHECK4-NEXT:    store i32 1, i32* [[DOTOMP_STRIDE]], align 4
555 // CHECK4-NEXT:    store i32 0, i32* [[DOTOMP_IS_LAST]], align 4
556 // CHECK4-NEXT:    [[TMP2:%.*]] = load i32*, i32** [[DOTGLOBAL_TID__ADDR]], align 8
557 // CHECK4-NEXT:    [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
558 // CHECK4-NEXT:    call void @__kmpc_barrier(%struct.ident_t* @[[GLOB1:[0-9]+]], i32 [[TMP3]])
559 // CHECK4-NEXT:    call void @__kmpc_for_static_init_4(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[TMP3]], i32 34, i32* [[DOTOMP_IS_LAST]], i32* [[DOTOMP_LB]], i32* [[DOTOMP_UB]], i32* [[DOTOMP_STRIDE]], i32 1, i32 1)
560 // CHECK4-NEXT:    [[TMP4:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
561 // CHECK4-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP4]], 1
562 // CHECK4-NEXT:    br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
563 // CHECK4:       cond.true:
564 // CHECK4-NEXT:    br label [[COND_END:%.*]]
565 // CHECK4:       cond.false:
566 // CHECK4-NEXT:    [[TMP5:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
567 // CHECK4-NEXT:    br label [[COND_END]]
568 // CHECK4:       cond.end:
569 // CHECK4-NEXT:    [[COND:%.*]] = phi i32 [ 1, [[COND_TRUE]] ], [ [[TMP5]], [[COND_FALSE]] ]
570 // CHECK4-NEXT:    store i32 [[COND]], i32* [[DOTOMP_UB]], align 4
571 // CHECK4-NEXT:    [[TMP6:%.*]] = load i32, i32* [[DOTOMP_LB]], align 4
572 // CHECK4-NEXT:    store i32 [[TMP6]], i32* [[DOTOMP_IV]], align 4
573 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
574 // CHECK4:       omp.inner.for.cond:
575 // CHECK4-NEXT:    [[TMP7:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
576 // CHECK4-NEXT:    [[TMP8:%.*]] = load i32, i32* [[DOTOMP_UB]], align 4
577 // CHECK4-NEXT:    [[CMP2:%.*]] = icmp sle i32 [[TMP7]], [[TMP8]]
578 // CHECK4-NEXT:    br i1 [[CMP2]], label [[OMP_INNER_FOR_BODY:%.*]], label [[OMP_INNER_FOR_END:%.*]]
579 // CHECK4:       omp.inner.for.body:
580 // CHECK4-NEXT:    [[TMP9:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
581 // CHECK4-NEXT:    [[MUL:%.*]] = mul nsw i32 [[TMP9]], 1
582 // CHECK4-NEXT:    [[ADD:%.*]] = add nsw i32 0, [[MUL]]
583 // CHECK4-NEXT:    store i32 [[ADD]], i32* [[I]], align 4
584 // CHECK4-NEXT:    [[TMP10:%.*]] = load i32, i32* [[DOTLINEAR_START]], align 4
585 // CHECK4-NEXT:    [[TMP11:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
586 // CHECK4-NEXT:    [[MUL3:%.*]] = mul nsw i32 [[TMP11]], 5
587 // CHECK4-NEXT:    [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[MUL3]]
588 // CHECK4-NEXT:    store i32 [[ADD4]], i32* [[G1]], align 4
589 // CHECK4-NEXT:    [[TMP12:%.*]] = load i32, i32* [[G1]], align 4
590 // CHECK4-NEXT:    [[ADD5:%.*]] = add nsw i32 [[TMP12]], 5
591 // CHECK4-NEXT:    store i32 [[ADD5]], i32* [[G1]], align 4
592 // CHECK4-NEXT:    store i32 1, i32* [[G1]], align 4
593 // CHECK4-NEXT:    [[BLOCK_ISA:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 0
594 // CHECK4-NEXT:    store i8* bitcast (i8** @_NSConcreteStackBlock to i8*), i8** [[BLOCK_ISA]], align 8
595 // CHECK4-NEXT:    [[BLOCK_FLAGS:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 1
596 // CHECK4-NEXT:    store i32 1073741824, i32* [[BLOCK_FLAGS]], align 8
597 // CHECK4-NEXT:    [[BLOCK_RESERVED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 2
598 // CHECK4-NEXT:    store i32 0, i32* [[BLOCK_RESERVED]], align 4
599 // CHECK4-NEXT:    [[BLOCK_INVOKE:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 3
600 // CHECK4-NEXT:    store i8* bitcast (void (i8*)* @g_block_invoke to i8*), i8** [[BLOCK_INVOKE]], align 8
601 // CHECK4-NEXT:    [[BLOCK_DESCRIPTOR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 4
602 // CHECK4-NEXT:    store %struct.__block_descriptor* bitcast ({ i64, i64, i8*, i8* }* @__block_descriptor_tmp.1 to %struct.__block_descriptor*), %struct.__block_descriptor** [[BLOCK_DESCRIPTOR]], align 8
603 // CHECK4-NEXT:    [[BLOCK_CAPTURED:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5
604 // CHECK4-NEXT:    [[TMP13:%.*]] = load volatile i32, i32* [[G1]], align 4
605 // CHECK4-NEXT:    store volatile i32 [[TMP13]], i32* [[BLOCK_CAPTURED]], align 8
606 // CHECK4-NEXT:    [[TMP14:%.*]] = bitcast <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]] to void ()*
607 // CHECK4-NEXT:    [[BLOCK_LITERAL:%.*]] = bitcast void ()* [[TMP14]] to %struct.__block_literal_generic*
608 // CHECK4-NEXT:    [[TMP15:%.*]] = getelementptr inbounds [[STRUCT___BLOCK_LITERAL_GENERIC:%.*]], %struct.__block_literal_generic* [[BLOCK_LITERAL]], i32 0, i32 3
609 // CHECK4-NEXT:    [[TMP16:%.*]] = bitcast %struct.__block_literal_generic* [[BLOCK_LITERAL]] to i8*
610 // CHECK4-NEXT:    [[TMP17:%.*]] = load i8*, i8** [[TMP15]], align 8
611 // CHECK4-NEXT:    [[TMP18:%.*]] = bitcast i8* [[TMP17]] to void (i8*)*
612 // CHECK4-NEXT:    call void [[TMP18]](i8* noundef [[TMP16]])
613 // CHECK4-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
614 // CHECK4:       omp.body.continue:
615 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_INC:%.*]]
616 // CHECK4:       omp.inner.for.inc:
617 // CHECK4-NEXT:    [[TMP19:%.*]] = load i32, i32* [[DOTOMP_IV]], align 4
618 // CHECK4-NEXT:    [[ADD6:%.*]] = add nsw i32 [[TMP19]], 1
619 // CHECK4-NEXT:    store i32 [[ADD6]], i32* [[DOTOMP_IV]], align 4
620 // CHECK4-NEXT:    br label [[OMP_INNER_FOR_COND]]
621 // CHECK4:       omp.inner.for.end:
622 // CHECK4-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
623 // CHECK4:       omp.loop.exit:
624 // CHECK4-NEXT:    call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB2]], i32 [[TMP3]])
625 // CHECK4-NEXT:    [[TMP20:%.*]] = load i32, i32* [[DOTOMP_IS_LAST]], align 4
626 // CHECK4-NEXT:    [[TMP21:%.*]] = icmp ne i32 [[TMP20]], 0
627 // CHECK4-NEXT:    br i1 [[TMP21]], label [[DOTOMP_LINEAR_PU:%.*]], label [[DOTOMP_LINEAR_PU_DONE:%.*]]
628 // CHECK4:       .omp.linear.pu:
629 // CHECK4-NEXT:    [[TMP22:%.*]] = load i32, i32* [[G1]], align 4
630 // CHECK4-NEXT:    store i32 [[TMP22]], i32* [[TMP0]], align 4
631 // CHECK4-NEXT:    br label [[DOTOMP_LINEAR_PU_DONE]]
632 // CHECK4:       .omp.linear.pu.done:
633 // CHECK4-NEXT:    ret void
634 //
635 //
636 // CHECK4-LABEL: define {{[^@]+}}@g_block_invoke
637 // CHECK4-SAME: (i8* noundef [[DOTBLOCK_DESCRIPTOR:%.*]]) #[[ATTR2]] {
638 // CHECK4-NEXT:  entry:
639 // CHECK4-NEXT:    [[DOTBLOCK_DESCRIPTOR_ADDR:%.*]] = alloca i8*, align 8
640 // CHECK4-NEXT:    [[BLOCK_ADDR:%.*]] = alloca <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*, align 8
641 // CHECK4-NEXT:    store i8* [[DOTBLOCK_DESCRIPTOR]], i8** [[DOTBLOCK_DESCRIPTOR_ADDR]], align 8
642 // CHECK4-NEXT:    [[BLOCK:%.*]] = bitcast i8* [[DOTBLOCK_DESCRIPTOR]] to <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>*
643 // CHECK4-NEXT:    store <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>** [[BLOCK_ADDR]], align 8
644 // CHECK4-NEXT:    [[BLOCK_CAPTURE_ADDR:%.*]] = getelementptr inbounds <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>, <{ i8*, i32, i32, i8*, %struct.__block_descriptor*, i32 }>* [[BLOCK]], i32 0, i32 5
645 // CHECK4-NEXT:    store i32 2, i32* [[BLOCK_CAPTURE_ADDR]], align 8
646 // CHECK4-NEXT:    ret void
647 //
648