xref: /dflybsd-src/contrib/gcc-8.0/libgomp/parallel.c (revision 38fd149817dfbff97799f62fcb70be98c4e32523)
1*38fd1498Szrj /* Copyright (C) 2005-2018 Free Software Foundation, Inc.
2*38fd1498Szrj    Contributed by Richard Henderson <rth@redhat.com>.
3*38fd1498Szrj 
4*38fd1498Szrj    This file is part of the GNU Offloading and Multi Processing Library
5*38fd1498Szrj    (libgomp).
6*38fd1498Szrj 
7*38fd1498Szrj    Libgomp is free software; you can redistribute it and/or modify it
8*38fd1498Szrj    under the terms of the GNU General Public License as published by
9*38fd1498Szrj    the Free Software Foundation; either version 3, or (at your option)
10*38fd1498Szrj    any later version.
11*38fd1498Szrj 
12*38fd1498Szrj    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13*38fd1498Szrj    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
14*38fd1498Szrj    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15*38fd1498Szrj    more details.
16*38fd1498Szrj 
17*38fd1498Szrj    Under Section 7 of GPL version 3, you are granted additional
18*38fd1498Szrj    permissions described in the GCC Runtime Library Exception, version
19*38fd1498Szrj    3.1, as published by the Free Software Foundation.
20*38fd1498Szrj 
21*38fd1498Szrj    You should have received a copy of the GNU General Public License and
22*38fd1498Szrj    a copy of the GCC Runtime Library Exception along with this program;
23*38fd1498Szrj    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
24*38fd1498Szrj    <http://www.gnu.org/licenses/>.  */
25*38fd1498Szrj 
26*38fd1498Szrj /* This file handles the (bare) PARALLEL construct.  */
27*38fd1498Szrj 
28*38fd1498Szrj #include "libgomp.h"
29*38fd1498Szrj #include <limits.h>
30*38fd1498Szrj 
31*38fd1498Szrj 
32*38fd1498Szrj /* Determine the number of threads to be launched for a PARALLEL construct.
33*38fd1498Szrj    This algorithm is explicitly described in OpenMP 3.0 section 2.4.1.
34*38fd1498Szrj    SPECIFIED is a combination of the NUM_THREADS clause and the IF clause.
35*38fd1498Szrj    If the IF clause is false, SPECIFIED is forced to 1.  When NUM_THREADS
36*38fd1498Szrj    is not present, SPECIFIED is 0.  */
37*38fd1498Szrj 
38*38fd1498Szrj unsigned
gomp_resolve_num_threads(unsigned specified,unsigned count)39*38fd1498Szrj gomp_resolve_num_threads (unsigned specified, unsigned count)
40*38fd1498Szrj {
41*38fd1498Szrj   struct gomp_thread *thr = gomp_thread ();
42*38fd1498Szrj   struct gomp_task_icv *icv;
43*38fd1498Szrj   unsigned threads_requested, max_num_threads, num_threads;
44*38fd1498Szrj   unsigned long busy;
45*38fd1498Szrj   struct gomp_thread_pool *pool;
46*38fd1498Szrj 
47*38fd1498Szrj   icv = gomp_icv (false);
48*38fd1498Szrj 
49*38fd1498Szrj   if (specified == 1)
50*38fd1498Szrj     return 1;
51*38fd1498Szrj   else if (thr->ts.active_level >= 1 && !icv->nest_var)
52*38fd1498Szrj     return 1;
53*38fd1498Szrj   else if (thr->ts.active_level >= gomp_max_active_levels_var)
54*38fd1498Szrj     return 1;
55*38fd1498Szrj 
56*38fd1498Szrj   /* If NUM_THREADS not specified, use nthreads_var.  */
57*38fd1498Szrj   if (specified == 0)
58*38fd1498Szrj     threads_requested = icv->nthreads_var;
59*38fd1498Szrj   else
60*38fd1498Szrj     threads_requested = specified;
61*38fd1498Szrj 
62*38fd1498Szrj   max_num_threads = threads_requested;
63*38fd1498Szrj 
64*38fd1498Szrj   /* If dynamic threads are enabled, bound the number of threads
65*38fd1498Szrj      that we launch.  */
66*38fd1498Szrj   if (icv->dyn_var)
67*38fd1498Szrj     {
68*38fd1498Szrj       unsigned dyn = gomp_dynamic_max_threads ();
69*38fd1498Szrj       if (dyn < max_num_threads)
70*38fd1498Szrj 	max_num_threads = dyn;
71*38fd1498Szrj 
72*38fd1498Szrj       /* Optimization for parallel sections.  */
73*38fd1498Szrj       if (count && count < max_num_threads)
74*38fd1498Szrj 	max_num_threads = count;
75*38fd1498Szrj     }
76*38fd1498Szrj 
77*38fd1498Szrj   /* UINT_MAX stands for infinity.  */
78*38fd1498Szrj   if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1)
79*38fd1498Szrj       || max_num_threads == 1)
80*38fd1498Szrj     return max_num_threads;
81*38fd1498Szrj 
82*38fd1498Szrj   /* The threads_busy counter lives in thread_pool, if there
83*38fd1498Szrj      isn't a thread_pool yet, there must be just one thread
84*38fd1498Szrj      in the contention group.  If thr->team is NULL, this isn't
85*38fd1498Szrj      nested parallel, so there is just one thread in the
86*38fd1498Szrj      contention group as well, no need to handle it atomically.  */
87*38fd1498Szrj   pool = thr->thread_pool;
88*38fd1498Szrj   if (thr->ts.team == NULL || pool == NULL)
89*38fd1498Szrj     {
90*38fd1498Szrj       num_threads = max_num_threads;
91*38fd1498Szrj       if (num_threads > icv->thread_limit_var)
92*38fd1498Szrj 	num_threads = icv->thread_limit_var;
93*38fd1498Szrj       if (pool)
94*38fd1498Szrj 	pool->threads_busy = num_threads;
95*38fd1498Szrj       return num_threads;
96*38fd1498Szrj     }
97*38fd1498Szrj 
98*38fd1498Szrj #ifdef HAVE_SYNC_BUILTINS
99*38fd1498Szrj   do
100*38fd1498Szrj     {
101*38fd1498Szrj       busy = pool->threads_busy;
102*38fd1498Szrj       num_threads = max_num_threads;
103*38fd1498Szrj       if (icv->thread_limit_var - busy + 1 < num_threads)
104*38fd1498Szrj 	num_threads = icv->thread_limit_var - busy + 1;
105*38fd1498Szrj     }
106*38fd1498Szrj   while (__sync_val_compare_and_swap (&pool->threads_busy,
107*38fd1498Szrj 				      busy, busy + num_threads - 1)
108*38fd1498Szrj 	 != busy);
109*38fd1498Szrj #else
110*38fd1498Szrj   gomp_mutex_lock (&gomp_managed_threads_lock);
111*38fd1498Szrj   num_threads = max_num_threads;
112*38fd1498Szrj   busy = pool->threads_busy;
113*38fd1498Szrj   if (icv->thread_limit_var - busy + 1 < num_threads)
114*38fd1498Szrj     num_threads = icv->thread_limit_var - busy + 1;
115*38fd1498Szrj   pool->threads_busy += num_threads - 1;
116*38fd1498Szrj   gomp_mutex_unlock (&gomp_managed_threads_lock);
117*38fd1498Szrj #endif
118*38fd1498Szrj 
119*38fd1498Szrj   return num_threads;
120*38fd1498Szrj }
121*38fd1498Szrj 
122*38fd1498Szrj void
GOMP_parallel_start(void (* fn)(void *),void * data,unsigned num_threads)123*38fd1498Szrj GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads)
124*38fd1498Szrj {
125*38fd1498Szrj   num_threads = gomp_resolve_num_threads (num_threads, 0);
126*38fd1498Szrj   gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads));
127*38fd1498Szrj }
128*38fd1498Szrj 
129*38fd1498Szrj void
GOMP_parallel_end(void)130*38fd1498Szrj GOMP_parallel_end (void)
131*38fd1498Szrj {
132*38fd1498Szrj   struct gomp_task_icv *icv = gomp_icv (false);
133*38fd1498Szrj   if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0))
134*38fd1498Szrj     {
135*38fd1498Szrj       struct gomp_thread *thr = gomp_thread ();
136*38fd1498Szrj       struct gomp_team *team = thr->ts.team;
137*38fd1498Szrj       unsigned int nthreads = team ? team->nthreads : 1;
138*38fd1498Szrj       gomp_team_end ();
139*38fd1498Szrj       if (nthreads > 1)
140*38fd1498Szrj 	{
141*38fd1498Szrj 	  /* If not nested, there is just one thread in the
142*38fd1498Szrj 	     contention group left, no need for atomicity.  */
143*38fd1498Szrj 	  if (thr->ts.team == NULL)
144*38fd1498Szrj 	    thr->thread_pool->threads_busy = 1;
145*38fd1498Szrj 	  else
146*38fd1498Szrj 	    {
147*38fd1498Szrj #ifdef HAVE_SYNC_BUILTINS
148*38fd1498Szrj 	      __sync_fetch_and_add (&thr->thread_pool->threads_busy,
149*38fd1498Szrj 				    1UL - nthreads);
150*38fd1498Szrj #else
151*38fd1498Szrj 	      gomp_mutex_lock (&gomp_managed_threads_lock);
152*38fd1498Szrj 	      thr->thread_pool->threads_busy -= nthreads - 1;
153*38fd1498Szrj 	      gomp_mutex_unlock (&gomp_managed_threads_lock);
154*38fd1498Szrj #endif
155*38fd1498Szrj 	    }
156*38fd1498Szrj 	}
157*38fd1498Szrj     }
158*38fd1498Szrj   else
159*38fd1498Szrj     gomp_team_end ();
160*38fd1498Szrj }
ialias(GOMP_parallel_end)161*38fd1498Szrj ialias (GOMP_parallel_end)
162*38fd1498Szrj 
163*38fd1498Szrj void
164*38fd1498Szrj GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags)
165*38fd1498Szrj {
166*38fd1498Szrj   num_threads = gomp_resolve_num_threads (num_threads, 0);
167*38fd1498Szrj   gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads));
168*38fd1498Szrj   fn (data);
169*38fd1498Szrj   ialias_call (GOMP_parallel_end) ();
170*38fd1498Szrj }
171*38fd1498Szrj 
172*38fd1498Szrj bool
GOMP_cancellation_point(int which)173*38fd1498Szrj GOMP_cancellation_point (int which)
174*38fd1498Szrj {
175*38fd1498Szrj   if (!gomp_cancel_var)
176*38fd1498Szrj     return false;
177*38fd1498Szrj 
178*38fd1498Szrj   struct gomp_thread *thr = gomp_thread ();
179*38fd1498Szrj   struct gomp_team *team = thr->ts.team;
180*38fd1498Szrj   if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
181*38fd1498Szrj     {
182*38fd1498Szrj       if (team == NULL)
183*38fd1498Szrj 	return false;
184*38fd1498Szrj       return team->work_share_cancelled != 0;
185*38fd1498Szrj     }
186*38fd1498Szrj   else if (which & GOMP_CANCEL_TASKGROUP)
187*38fd1498Szrj     {
188*38fd1498Szrj       if (thr->task->taskgroup && thr->task->taskgroup->cancelled)
189*38fd1498Szrj 	return true;
190*38fd1498Szrj       /* FALLTHRU into the GOMP_CANCEL_PARALLEL case,
191*38fd1498Szrj 	 as #pragma omp cancel parallel also cancels all explicit
192*38fd1498Szrj 	 tasks.  */
193*38fd1498Szrj     }
194*38fd1498Szrj   if (team)
195*38fd1498Szrj     return gomp_team_barrier_cancelled (&team->barrier);
196*38fd1498Szrj   return false;
197*38fd1498Szrj }
ialias(GOMP_cancellation_point)198*38fd1498Szrj ialias (GOMP_cancellation_point)
199*38fd1498Szrj 
200*38fd1498Szrj bool
201*38fd1498Szrj GOMP_cancel (int which, bool do_cancel)
202*38fd1498Szrj {
203*38fd1498Szrj   if (!gomp_cancel_var)
204*38fd1498Szrj     return false;
205*38fd1498Szrj 
206*38fd1498Szrj   if (!do_cancel)
207*38fd1498Szrj     return ialias_call (GOMP_cancellation_point) (which);
208*38fd1498Szrj 
209*38fd1498Szrj   struct gomp_thread *thr = gomp_thread ();
210*38fd1498Szrj   struct gomp_team *team = thr->ts.team;
211*38fd1498Szrj   if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS))
212*38fd1498Szrj     {
213*38fd1498Szrj       /* In orphaned worksharing region, all we want to cancel
214*38fd1498Szrj 	 is current thread.  */
215*38fd1498Szrj       if (team != NULL)
216*38fd1498Szrj 	team->work_share_cancelled = 1;
217*38fd1498Szrj       return true;
218*38fd1498Szrj     }
219*38fd1498Szrj   else if (which & GOMP_CANCEL_TASKGROUP)
220*38fd1498Szrj     {
221*38fd1498Szrj       if (thr->task->taskgroup && !thr->task->taskgroup->cancelled)
222*38fd1498Szrj 	{
223*38fd1498Szrj 	  gomp_mutex_lock (&team->task_lock);
224*38fd1498Szrj 	  thr->task->taskgroup->cancelled = true;
225*38fd1498Szrj 	  gomp_mutex_unlock (&team->task_lock);
226*38fd1498Szrj 	}
227*38fd1498Szrj       return true;
228*38fd1498Szrj     }
229*38fd1498Szrj   team->team_cancelled = 1;
230*38fd1498Szrj   gomp_team_barrier_cancel (team);
231*38fd1498Szrj   return true;
232*38fd1498Szrj }
233*38fd1498Szrj 
234*38fd1498Szrj /* The public OpenMP API for thread and team related inquiries.  */
235*38fd1498Szrj 
236*38fd1498Szrj int
omp_get_num_threads(void)237*38fd1498Szrj omp_get_num_threads (void)
238*38fd1498Szrj {
239*38fd1498Szrj   struct gomp_team *team = gomp_thread ()->ts.team;
240*38fd1498Szrj   return team ? team->nthreads : 1;
241*38fd1498Szrj }
242*38fd1498Szrj 
243*38fd1498Szrj int
omp_get_thread_num(void)244*38fd1498Szrj omp_get_thread_num (void)
245*38fd1498Szrj {
246*38fd1498Szrj   return gomp_thread ()->ts.team_id;
247*38fd1498Szrj }
248*38fd1498Szrj 
249*38fd1498Szrj /* This wasn't right for OpenMP 2.5.  Active region used to be non-zero
250*38fd1498Szrj    when the IF clause doesn't evaluate to false, starting with OpenMP 3.0
251*38fd1498Szrj    it is non-zero with more than one thread in the team.  */
252*38fd1498Szrj 
253*38fd1498Szrj int
omp_in_parallel(void)254*38fd1498Szrj omp_in_parallel (void)
255*38fd1498Szrj {
256*38fd1498Szrj   return gomp_thread ()->ts.active_level > 0;
257*38fd1498Szrj }
258*38fd1498Szrj 
259*38fd1498Szrj int
omp_get_level(void)260*38fd1498Szrj omp_get_level (void)
261*38fd1498Szrj {
262*38fd1498Szrj   return gomp_thread ()->ts.level;
263*38fd1498Szrj }
264*38fd1498Szrj 
265*38fd1498Szrj int
omp_get_ancestor_thread_num(int level)266*38fd1498Szrj omp_get_ancestor_thread_num (int level)
267*38fd1498Szrj {
268*38fd1498Szrj   struct gomp_team_state *ts = &gomp_thread ()->ts;
269*38fd1498Szrj   if (level < 0 || level > ts->level)
270*38fd1498Szrj     return -1;
271*38fd1498Szrj   for (level = ts->level - level; level > 0; --level)
272*38fd1498Szrj     ts = &ts->team->prev_ts;
273*38fd1498Szrj   return ts->team_id;
274*38fd1498Szrj }
275*38fd1498Szrj 
276*38fd1498Szrj int
omp_get_team_size(int level)277*38fd1498Szrj omp_get_team_size (int level)
278*38fd1498Szrj {
279*38fd1498Szrj   struct gomp_team_state *ts = &gomp_thread ()->ts;
280*38fd1498Szrj   if (level < 0 || level > ts->level)
281*38fd1498Szrj     return -1;
282*38fd1498Szrj   for (level = ts->level - level; level > 0; --level)
283*38fd1498Szrj     ts = &ts->team->prev_ts;
284*38fd1498Szrj   if (ts->team == NULL)
285*38fd1498Szrj     return 1;
286*38fd1498Szrj   else
287*38fd1498Szrj     return ts->team->nthreads;
288*38fd1498Szrj }
289*38fd1498Szrj 
290*38fd1498Szrj int
omp_get_active_level(void)291*38fd1498Szrj omp_get_active_level (void)
292*38fd1498Szrj {
293*38fd1498Szrj   return gomp_thread ()->ts.active_level;
294*38fd1498Szrj }
295*38fd1498Szrj 
296*38fd1498Szrj ialias (omp_get_num_threads)
297*38fd1498Szrj ialias (omp_get_thread_num)
298*38fd1498Szrj ialias (omp_in_parallel)
299*38fd1498Szrj ialias (omp_get_level)
300*38fd1498Szrj ialias (omp_get_ancestor_thread_num)
301*38fd1498Szrj ialias (omp_get_team_size)
302*38fd1498Szrj ialias (omp_get_active_level)
303