xref: /llvm-project/llvm/test/Transforms/SCCP/openmp_parallel_for.ll (revision 0991da36906bdb29b8f1030d5123bafb58fee79e)
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -passes=ipsccp < %s | FileCheck %s
3;
4;    void bar(int, float, double);
5;
6;    void foo(int N) {
7;      float p = 3;
8;      double q = 5;
9;      N = 7;
10;
11;    #pragma omp parallel for firstprivate(q)
12;      for (int i = 2; i < N; i++) {
13;        bar(i, p, q);
14;      }
15;    }
16;
17; Verify the constant value of q is propagated into the outlined function.
18;
19target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
20
21%struct.ident_t = type { i32, i32, i32, i32, ptr }
22
23@.str = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1
24@0 = private unnamed_addr global %struct.ident_t { i32 0, i32 514, i32 0, i32 0, ptr @.str }, align 8
25@1 = private unnamed_addr global %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @.str }, align 8
26
27define dso_local void @foo(i32 %N) {
28; CHECK-LABEL: @foo(
29; CHECK-NEXT:  entry:
30; CHECK-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
31; CHECK-NEXT:    [[P:%.*]] = alloca float, align 4
32; CHECK-NEXT:    store i32 [[N:%.*]], ptr [[N_ADDR]], align 4
33; CHECK-NEXT:    store float 3.000000e+00, ptr [[P]], align 4
34; CHECK-NEXT:    store i32 7, ptr [[N_ADDR]], align 4
35; CHECK-NEXT:    call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 3, ptr @.omp_outlined., ptr nonnull [[N_ADDR]], ptr nonnull [[P]], i64 4617315517961601024)
36; CHECK-NEXT:    ret void
37;
38entry:
39  %N.addr = alloca i32, align 4
40  %p = alloca float, align 4
41  store i32 %N, ptr %N.addr, align 4
42  store float 3.000000e+00, ptr %p, align 4
43  store i32 7, ptr %N.addr, align 4
44  call void (ptr, i32, ptr, ...) @__kmpc_fork_call(ptr nonnull @1, i32 3, ptr @.omp_outlined., ptr nonnull %N.addr, ptr nonnull %p, i64 4617315517961601024)
45  ret void
46}
47
48define internal void @.omp_outlined.(ptr noalias %.global_tid., ptr noalias %.bound_tid., ptr dereferenceable(4) %N, ptr dereferenceable(4) %p, i64 %q) {
49; CHECK-LABEL: @.omp_outlined.(
50; CHECK-NEXT:  entry:
51; CHECK-NEXT:    [[Q_ADDR:%.*]] = alloca i64, align 8
52; CHECK-NEXT:    [[DOTOMP_LB:%.*]] = alloca i32, align 4
53; CHECK-NEXT:    [[DOTOMP_UB:%.*]] = alloca i32, align 4
54; CHECK-NEXT:    [[DOTOMP_STRIDE:%.*]] = alloca i32, align 4
55; CHECK-NEXT:    [[DOTOMP_IS_LAST:%.*]] = alloca i32, align 4
56; CHECK-NEXT:    store i64 [[Q:%.*]], ptr [[Q_ADDR]], align 8
57; CHECK-NEXT:    [[TMP:%.*]] = load i32, ptr [[N:%.*]], align 4
58; CHECK-NEXT:    [[SUB3:%.*]] = add nsw i32 [[TMP]], -3
59; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[TMP]], 2
60; CHECK-NEXT:    br i1 [[CMP]], label [[OMP_PRECOND_THEN:%.*]], label [[OMP_PRECOND_END:%.*]]
61; CHECK:       omp.precond.then:
62; CHECK-NEXT:    store i32 0, ptr [[DOTOMP_LB]], align 4
63; CHECK-NEXT:    store i32 [[SUB3]], ptr [[DOTOMP_UB]], align 4
64; CHECK-NEXT:    store i32 1, ptr [[DOTOMP_STRIDE]], align 4
65; CHECK-NEXT:    store i32 0, ptr [[DOTOMP_IS_LAST]], align 4
66; CHECK-NEXT:    [[TMP5:%.*]] = load i32, ptr [[DOTGLOBAL_TID_:%.*]], align 4
67; CHECK-NEXT:    call void @__kmpc_for_static_init_4(ptr nonnull @0, i32 [[TMP5]], i32 34, ptr nonnull [[DOTOMP_IS_LAST]], ptr nonnull [[DOTOMP_LB]], ptr nonnull [[DOTOMP_UB]], ptr nonnull [[DOTOMP_STRIDE]], i32 1, i32 1)
68; CHECK-NEXT:    [[TMP6:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
69; CHECK-NEXT:    [[CMP6:%.*]] = icmp sgt i32 [[TMP6]], [[SUB3]]
70; CHECK-NEXT:    br i1 [[CMP6]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
71; CHECK:       cond.true:
72; CHECK-NEXT:    br label [[COND_END:%.*]]
73; CHECK:       cond.false:
74; CHECK-NEXT:    [[TMP7:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
75; CHECK-NEXT:    br label [[COND_END]]
76; CHECK:       cond.end:
77; CHECK-NEXT:    [[COND:%.*]] = phi i32 [ [[SUB3]], [[COND_TRUE]] ], [ [[TMP7]], [[COND_FALSE]] ]
78; CHECK-NEXT:    store i32 [[COND]], ptr [[DOTOMP_UB]], align 4
79; CHECK-NEXT:    [[TMP8:%.*]] = load i32, ptr [[DOTOMP_LB]], align 4
80; CHECK-NEXT:    br label [[OMP_INNER_FOR_COND:%.*]]
81; CHECK:       omp.inner.for.cond:
82; CHECK-NEXT:    [[DOTOMP_IV_0:%.*]] = phi i32 [ [[TMP8]], [[COND_END]] ], [ [[ADD11:%.*]], [[OMP_INNER_FOR_INC:%.*]] ]
83; CHECK-NEXT:    [[TMP9:%.*]] = load i32, ptr [[DOTOMP_UB]], align 4
84; CHECK-NEXT:    [[CMP8:%.*]] = icmp sgt i32 [[DOTOMP_IV_0]], [[TMP9]]
85; CHECK-NEXT:    br i1 [[CMP8]], label [[OMP_INNER_FOR_COND_CLEANUP:%.*]], label [[OMP_INNER_FOR_BODY:%.*]]
86; CHECK:       omp.inner.for.cond.cleanup:
87; CHECK-NEXT:    br label [[OMP_INNER_FOR_END:%.*]]
88; CHECK:       omp.inner.for.body:
89; CHECK-NEXT:    [[ADD10:%.*]] = add nsw i32 [[DOTOMP_IV_0]], 2
90; CHECK-NEXT:    [[TMP10:%.*]] = load float, ptr [[P:%.*]], align 4
91; CHECK-NEXT:    [[TMP11:%.*]] = load double, ptr [[Q_ADDR]], align 8
92; CHECK-NEXT:    call void @bar(i32 [[ADD10]], float [[TMP10]], double [[TMP11]])
93; CHECK-NEXT:    br label [[OMP_BODY_CONTINUE:%.*]]
94; CHECK:       omp.body.continue:
95; CHECK-NEXT:    br label [[OMP_INNER_FOR_INC]]
96; CHECK:       omp.inner.for.inc:
97; CHECK-NEXT:    [[ADD11]] = add nsw i32 [[DOTOMP_IV_0]], 1
98; CHECK-NEXT:    br label [[OMP_INNER_FOR_COND]]
99; CHECK:       omp.inner.for.end:
100; CHECK-NEXT:    br label [[OMP_LOOP_EXIT:%.*]]
101; CHECK:       omp.loop.exit:
102; CHECK-NEXT:    [[TMP12:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4
103; CHECK-NEXT:    call void @__kmpc_for_static_fini(ptr nonnull @0, i32 [[TMP12]])
104; CHECK-NEXT:    br label [[OMP_PRECOND_END]]
105; CHECK:       omp.precond.end:
106; CHECK-NEXT:    ret void
107;
108entry:
109  %q.addr = alloca i64, align 8
110  %.omp.lb = alloca i32, align 4
111  %.omp.ub = alloca i32, align 4
112  %.omp.stride = alloca i32, align 4
113  %.omp.is_last = alloca i32, align 4
114  store i64 %q, ptr %q.addr, align 8
115  %tmp = load i32, ptr %N, align 4
116  %sub3 = add nsw i32 %tmp, -3
117  %cmp = icmp sgt i32 %tmp, 2
118  br i1 %cmp, label %omp.precond.then, label %omp.precond.end
119
120omp.precond.then:                                 ; preds = %entry
121  store i32 0, ptr %.omp.lb, align 4
122  store i32 %sub3, ptr %.omp.ub, align 4
123  store i32 1, ptr %.omp.stride, align 4
124  store i32 0, ptr %.omp.is_last, align 4
125  %tmp5 = load i32, ptr %.global_tid., align 4
126  call void @__kmpc_for_static_init_4(ptr nonnull @0, i32 %tmp5, i32 34, ptr nonnull %.omp.is_last, ptr nonnull %.omp.lb, ptr nonnull %.omp.ub, ptr nonnull %.omp.stride, i32 1, i32 1)
127  %tmp6 = load i32, ptr %.omp.ub, align 4
128  %cmp6 = icmp sgt i32 %tmp6, %sub3
129  br i1 %cmp6, label %cond.true, label %cond.false
130
131cond.true:                                        ; preds = %omp.precond.then
132  br label %cond.end
133
134cond.false:                                       ; preds = %omp.precond.then
135  %tmp7 = load i32, ptr %.omp.ub, align 4
136  br label %cond.end
137
138cond.end:                                         ; preds = %cond.false, %cond.true
139  %cond = phi i32 [ %sub3, %cond.true ], [ %tmp7, %cond.false ]
140  store i32 %cond, ptr %.omp.ub, align 4
141  %tmp8 = load i32, ptr %.omp.lb, align 4
142  br label %omp.inner.for.cond
143
144omp.inner.for.cond:                               ; preds = %omp.inner.for.inc, %cond.end
145  %.omp.iv.0 = phi i32 [ %tmp8, %cond.end ], [ %add11, %omp.inner.for.inc ]
146  %tmp9 = load i32, ptr %.omp.ub, align 4
147  %cmp8 = icmp sgt i32 %.omp.iv.0, %tmp9
148  br i1 %cmp8, label %omp.inner.for.cond.cleanup, label %omp.inner.for.body
149
150omp.inner.for.cond.cleanup:                       ; preds = %omp.inner.for.cond
151  br label %omp.inner.for.end
152
153omp.inner.for.body:                               ; preds = %omp.inner.for.cond
154  %add10 = add nsw i32 %.omp.iv.0, 2
155  %tmp10 = load float, ptr %p, align 4
156  %tmp11 = load double, ptr %q.addr, align 8
157  call void @bar(i32 %add10, float %tmp10, double %tmp11)
158  br label %omp.body.continue
159
160omp.body.continue:                                ; preds = %omp.inner.for.body
161  br label %omp.inner.for.inc
162
163omp.inner.for.inc:                                ; preds = %omp.body.continue
164  %add11 = add nsw i32 %.omp.iv.0, 1
165  br label %omp.inner.for.cond
166
167omp.inner.for.end:                                ; preds = %omp.inner.for.cond.cleanup
168  br label %omp.loop.exit
169
170omp.loop.exit:                                    ; preds = %omp.inner.for.end
171  %tmp12 = load i32, ptr %.global_tid., align 4
172  call void @__kmpc_for_static_fini(ptr nonnull @0, i32 %tmp12)
173  br label %omp.precond.end
174
175omp.precond.end:                                  ; preds = %omp.loop.exit, %entry
176  ret void
177}
178
179declare dso_local void @__kmpc_for_static_init_4(ptr, i32, i32, ptr, ptr, ptr, ptr, i32, i32)
180
181declare dso_local void @bar(i32, float, double)
182
183declare dso_local void @__kmpc_for_static_fini(ptr, i32)
184
185declare !callback !0 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...)
186
187!1 = !{i64 2, i64 -1, i64 -1, i1 true}
188!0 = !{!1}
189