xref: /llvm-project/flang/test/Lower/OpenMP/host-eval.f90 (revision 8fe11a26ae8f12622ddec83a7b80637080843a8b)
1*8fe11a26SSergio Afonso! The "thread_limit" clause was added to the "target" construct in OpenMP 5.1.
2*8fe11a26SSergio Afonso! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 %s -o - | FileCheck %s --check-prefixes=BOTH,HOST
3*8fe11a26SSergio Afonso! RUN: %flang_fc1 -emit-hlfir -fopenmp -fopenmp-version=51 -fopenmp-is-target-device %s -o - | FileCheck %s --check-prefixes=BOTH,DEVICE
4*8fe11a26SSergio Afonso
5*8fe11a26SSergio Afonso! BOTH-LABEL: func.func @_QPteams
6*8fe11a26SSergio Afonsosubroutine teams()
7*8fe11a26SSergio Afonso  ! BOTH: omp.target
8*8fe11a26SSergio Afonso
9*8fe11a26SSergio Afonso  ! HOST-SAME: host_eval(%{{.*}} -> %[[NUM_TEAMS:.*]], %{{.*}} -> %[[THREAD_LIMIT:.*]] : i32, i32)
10*8fe11a26SSergio Afonso
11*8fe11a26SSergio Afonso  ! DEVICE-NOT: host_eval({{.*}})
12*8fe11a26SSergio Afonso  ! DEVICE-SAME: {
13*8fe11a26SSergio Afonso  !$omp target
14*8fe11a26SSergio Afonso
15*8fe11a26SSergio Afonso  ! BOTH: omp.teams
16*8fe11a26SSergio Afonso
17*8fe11a26SSergio Afonso  ! HOST-SAME: num_teams( to %[[NUM_TEAMS]] : i32) thread_limit(%[[THREAD_LIMIT]] : i32)
18*8fe11a26SSergio Afonso  ! DEVICE-SAME: num_teams({{.*}}) thread_limit({{.*}})
19*8fe11a26SSergio Afonso  !$omp teams num_teams(1) thread_limit(2)
20*8fe11a26SSergio Afonso  call foo()
21*8fe11a26SSergio Afonso  !$omp end teams
22*8fe11a26SSergio Afonso
23*8fe11a26SSergio Afonso  !$omp end target
24*8fe11a26SSergio Afonso
25*8fe11a26SSergio Afonso  ! BOTH: omp.teams
26*8fe11a26SSergio Afonso  ! BOTH-SAME: num_teams({{.*}}) thread_limit({{.*}}) {
27*8fe11a26SSergio Afonso  !$omp teams num_teams(1) thread_limit(2)
28*8fe11a26SSergio Afonso  call foo()
29*8fe11a26SSergio Afonso  !$omp end teams
30*8fe11a26SSergio Afonsoend subroutine teams
31*8fe11a26SSergio Afonso
32*8fe11a26SSergio Afonso! BOTH-LABEL: func.func @_QPdistribute_parallel_do
33*8fe11a26SSergio Afonsosubroutine distribute_parallel_do()
34*8fe11a26SSergio Afonso  ! BOTH: omp.target
35*8fe11a26SSergio Afonso
36*8fe11a26SSergio Afonso  ! HOST-SAME: host_eval(%{{.*}} -> %[[LB:.*]], %{{.*}} -> %[[UB:.*]], %{{.*}} -> %[[STEP:.*]], %{{.*}} -> %[[NUM_THREADS:.*]] : i32, i32, i32, i32)
37*8fe11a26SSergio Afonso
38*8fe11a26SSergio Afonso  ! DEVICE-NOT: host_eval({{.*}})
39*8fe11a26SSergio Afonso  ! DEVICE-SAME: {
40*8fe11a26SSergio Afonso
41*8fe11a26SSergio Afonso  ! BOTH: omp.teams
42*8fe11a26SSergio Afonso  !$omp target teams
43*8fe11a26SSergio Afonso
44*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
45*8fe11a26SSergio Afonso
46*8fe11a26SSergio Afonso  ! HOST-SAME: num_threads(%[[NUM_THREADS]] : i32)
47*8fe11a26SSergio Afonso  ! DEVICE-SAME: num_threads({{.*}})
48*8fe11a26SSergio Afonso
49*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
50*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
51*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.loop_nest
52*8fe11a26SSergio Afonso
53*8fe11a26SSergio Afonso  ! HOST-SAME: (%{{.*}}) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]])
54*8fe11a26SSergio Afonso  !$omp distribute parallel do num_threads(1)
55*8fe11a26SSergio Afonso  do i=1,10
56*8fe11a26SSergio Afonso    call foo()
57*8fe11a26SSergio Afonso  end do
58*8fe11a26SSergio Afonso  !$omp end distribute parallel do
59*8fe11a26SSergio Afonso  !$omp end target teams
60*8fe11a26SSergio Afonso
61*8fe11a26SSergio Afonso  ! BOTH: omp.target
62*8fe11a26SSergio Afonso  ! BOTH-NOT: host_eval({{.*}})
63*8fe11a26SSergio Afonso  ! BOTH-SAME: {
64*8fe11a26SSergio Afonso  ! BOTH: omp.teams
65*8fe11a26SSergio Afonso  !$omp target teams
66*8fe11a26SSergio Afonso  call foo() !< Prevents this from being SPMD.
67*8fe11a26SSergio Afonso
68*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
69*8fe11a26SSergio Afonso  ! BOTH-SAME: num_threads({{.*}})
70*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
71*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
72*8fe11a26SSergio Afonso  !$omp distribute parallel do num_threads(1)
73*8fe11a26SSergio Afonso  do i=1,10
74*8fe11a26SSergio Afonso    call foo()
75*8fe11a26SSergio Afonso  end do
76*8fe11a26SSergio Afonso  !$omp end distribute parallel do
77*8fe11a26SSergio Afonso  !$omp end target teams
78*8fe11a26SSergio Afonso
79*8fe11a26SSergio Afonso  ! BOTH: omp.teams
80*8fe11a26SSergio Afonso  !$omp teams
81*8fe11a26SSergio Afonso
82*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
83*8fe11a26SSergio Afonso  ! BOTH-SAME: num_threads({{.*}})
84*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
85*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
86*8fe11a26SSergio Afonso  !$omp distribute parallel do num_threads(1)
87*8fe11a26SSergio Afonso  do i=1,10
88*8fe11a26SSergio Afonso    call foo()
89*8fe11a26SSergio Afonso  end do
90*8fe11a26SSergio Afonso  !$omp end distribute parallel do
91*8fe11a26SSergio Afonso  !$omp end teams
92*8fe11a26SSergio Afonsoend subroutine distribute_parallel_do
93*8fe11a26SSergio Afonso
94*8fe11a26SSergio Afonso! BOTH-LABEL: func.func @_QPdistribute_parallel_do_simd
95*8fe11a26SSergio Afonsosubroutine distribute_parallel_do_simd()
96*8fe11a26SSergio Afonso  ! BOTH: omp.target
97*8fe11a26SSergio Afonso
98*8fe11a26SSergio Afonso  ! HOST-SAME: host_eval(%{{.*}} -> %[[LB:.*]], %{{.*}} -> %[[UB:.*]], %{{.*}} -> %[[STEP:.*]], %{{.*}} -> %[[NUM_THREADS:.*]] : i32, i32, i32, i32)
99*8fe11a26SSergio Afonso
100*8fe11a26SSergio Afonso  ! DEVICE-NOT: host_eval({{.*}})
101*8fe11a26SSergio Afonso  ! DEVICE-SAME: {
102*8fe11a26SSergio Afonso
103*8fe11a26SSergio Afonso  ! BOTH: omp.teams
104*8fe11a26SSergio Afonso  !$omp target teams
105*8fe11a26SSergio Afonso
106*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
107*8fe11a26SSergio Afonso
108*8fe11a26SSergio Afonso  ! HOST-SAME: num_threads(%[[NUM_THREADS]] : i32)
109*8fe11a26SSergio Afonso  ! DEVICE-SAME: num_threads({{.*}})
110*8fe11a26SSergio Afonso
111*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
112*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
113*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.simd
114*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.loop_nest
115*8fe11a26SSergio Afonso
116*8fe11a26SSergio Afonso  ! HOST-SAME: (%{{.*}}) : i32 = (%[[LB]]) to (%[[UB]]) inclusive step (%[[STEP]])
117*8fe11a26SSergio Afonso  !$omp distribute parallel do simd num_threads(1)
118*8fe11a26SSergio Afonso  do i=1,10
119*8fe11a26SSergio Afonso    call foo()
120*8fe11a26SSergio Afonso  end do
121*8fe11a26SSergio Afonso  !$omp end distribute parallel do simd
122*8fe11a26SSergio Afonso  !$omp end target teams
123*8fe11a26SSergio Afonso
124*8fe11a26SSergio Afonso  ! BOTH: omp.target
125*8fe11a26SSergio Afonso  ! BOTH-NOT: host_eval({{.*}})
126*8fe11a26SSergio Afonso  ! BOTH-SAME: {
127*8fe11a26SSergio Afonso  ! BOTH: omp.teams
128*8fe11a26SSergio Afonso  !$omp target teams
129*8fe11a26SSergio Afonso  call foo() !< Prevents this from being SPMD.
130*8fe11a26SSergio Afonso
131*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
132*8fe11a26SSergio Afonso  ! BOTH-SAME: num_threads({{.*}})
133*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
134*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
135*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.simd
136*8fe11a26SSergio Afonso  !$omp distribute parallel do simd num_threads(1)
137*8fe11a26SSergio Afonso  do i=1,10
138*8fe11a26SSergio Afonso    call foo()
139*8fe11a26SSergio Afonso  end do
140*8fe11a26SSergio Afonso  !$omp end distribute parallel do simd
141*8fe11a26SSergio Afonso  !$omp end target teams
142*8fe11a26SSergio Afonso
143*8fe11a26SSergio Afonso  ! BOTH: omp.teams
144*8fe11a26SSergio Afonso  !$omp teams
145*8fe11a26SSergio Afonso
146*8fe11a26SSergio Afonso  ! BOTH: omp.parallel
147*8fe11a26SSergio Afonso  ! BOTH-SAME: num_threads({{.*}})
148*8fe11a26SSergio Afonso  ! BOTH: omp.distribute
149*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.wsloop
150*8fe11a26SSergio Afonso  ! BOTH-NEXT: omp.simd
151*8fe11a26SSergio Afonso  !$omp distribute parallel do simd num_threads(1)
152*8fe11a26SSergio Afonso  do i=1,10
153*8fe11a26SSergio Afonso    call foo()
154*8fe11a26SSergio Afonso  end do
155*8fe11a26SSergio Afonso  !$omp end distribute parallel do simd
156*8fe11a26SSergio Afonso  !$omp end teams
157*8fe11a26SSergio Afonsoend subroutine distribute_parallel_do_simd
158