xref: /openbsd-src/gnu/gcc/libgomp/iter.c (revision 404b540a9034ac75a6199ad1a32d1bbc7a0d4210)
1*404b540aSrobert /* Copyright (C) 2005 Free Software Foundation, Inc.
2*404b540aSrobert    Contributed by Richard Henderson <rth@redhat.com>.
3*404b540aSrobert 
4*404b540aSrobert    This file is part of the GNU OpenMP Library (libgomp).
5*404b540aSrobert 
6*404b540aSrobert    Libgomp is free software; you can redistribute it and/or modify it
7*404b540aSrobert    under the terms of the GNU Lesser General Public License as published by
8*404b540aSrobert    the Free Software Foundation; either version 2.1 of the License, or
9*404b540aSrobert    (at your option) any later version.
10*404b540aSrobert 
11*404b540aSrobert    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
12*404b540aSrobert    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
13*404b540aSrobert    FOR A PARTICULAR PURPOSE.  See the GNU Lesser General Public License for
14*404b540aSrobert    more details.
15*404b540aSrobert 
16*404b540aSrobert    You should have received a copy of the GNU Lesser General Public License
17*404b540aSrobert    along with libgomp; see the file COPYING.LIB.  If not, write to the
18*404b540aSrobert    Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19*404b540aSrobert    MA 02110-1301, USA.  */
20*404b540aSrobert 
21*404b540aSrobert /* As a special exception, if you link this library with other files, some
22*404b540aSrobert    of which are compiled with GCC, to produce an executable, this library
23*404b540aSrobert    does not by itself cause the resulting executable to be covered by the
24*404b540aSrobert    GNU General Public License.  This exception does not however invalidate
25*404b540aSrobert    any other reasons why the executable file might be covered by the GNU
26*404b540aSrobert    General Public License.  */
27*404b540aSrobert 
28*404b540aSrobert /* This file contains routines for managing work-share iteration, both
29*404b540aSrobert    for loops and sections.  */
30*404b540aSrobert 
31*404b540aSrobert #include "libgomp.h"
32*404b540aSrobert #include <stdlib.h>
33*404b540aSrobert 
34*404b540aSrobert 
35*404b540aSrobert /* This function implements the STATIC scheduling method.  The caller should
36*404b540aSrobert    iterate *pstart <= x < *pend.  Return zero if there are more iterations
37*404b540aSrobert    to perform; nonzero if not.  Return less than 0 if this thread had
38*404b540aSrobert    received the absolutely last iteration.  */
39*404b540aSrobert 
40*404b540aSrobert int
gomp_iter_static_next(long * pstart,long * pend)41*404b540aSrobert gomp_iter_static_next (long *pstart, long *pend)
42*404b540aSrobert {
43*404b540aSrobert   struct gomp_thread *thr = gomp_thread ();
44*404b540aSrobert   struct gomp_team *team = thr->ts.team;
45*404b540aSrobert   struct gomp_work_share *ws = thr->ts.work_share;
46*404b540aSrobert   unsigned long nthreads = team ? team->nthreads : 1;
47*404b540aSrobert 
48*404b540aSrobert   if (thr->ts.static_trip == -1)
49*404b540aSrobert     return -1;
50*404b540aSrobert 
51*404b540aSrobert   /* Quick test for degenerate teams and orphaned constructs.  */
52*404b540aSrobert   if (nthreads == 1)
53*404b540aSrobert     {
54*404b540aSrobert       *pstart = ws->next;
55*404b540aSrobert       *pend = ws->end;
56*404b540aSrobert       thr->ts.static_trip = -1;
57*404b540aSrobert       return ws->next == ws->end;
58*404b540aSrobert     }
59*404b540aSrobert 
60*404b540aSrobert   /* We interpret chunk_size zero as "unspecified", which means that we
61*404b540aSrobert      should break up the iterations such that each thread makes only one
62*404b540aSrobert      trip through the outer loop.  */
63*404b540aSrobert   if (ws->chunk_size == 0)
64*404b540aSrobert     {
65*404b540aSrobert       unsigned long n, q, i;
66*404b540aSrobert       unsigned long s0, e0;
67*404b540aSrobert       long s, e;
68*404b540aSrobert 
69*404b540aSrobert       if (thr->ts.static_trip > 0)
70*404b540aSrobert 	return 1;
71*404b540aSrobert 
72*404b540aSrobert       /* Compute the total number of iterations.  */
73*404b540aSrobert       s = ws->incr + (ws->incr > 0 ? -1 : 1);
74*404b540aSrobert       n = (ws->end - ws->next + s) / ws->incr;
75*404b540aSrobert       i = thr->ts.team_id;
76*404b540aSrobert 
77*404b540aSrobert       /* Compute the "zero-based" start and end points.  That is, as
78*404b540aSrobert          if the loop began at zero and incremented by one.  */
79*404b540aSrobert       q = n / nthreads;
80*404b540aSrobert       q += (q * nthreads != n);
81*404b540aSrobert       s0 = q * i;
82*404b540aSrobert       e0 = s0 + q;
83*404b540aSrobert       if (e0 > n)
84*404b540aSrobert         e0 = n;
85*404b540aSrobert 
86*404b540aSrobert       /* Notice when no iterations allocated for this thread.  */
87*404b540aSrobert       if (s0 >= e0)
88*404b540aSrobert 	{
89*404b540aSrobert 	  thr->ts.static_trip = 1;
90*404b540aSrobert 	  return 1;
91*404b540aSrobert 	}
92*404b540aSrobert 
93*404b540aSrobert       /* Transform these to the actual start and end numbers.  */
94*404b540aSrobert       s = (long)s0 * ws->incr + ws->next;
95*404b540aSrobert       e = (long)e0 * ws->incr + ws->next;
96*404b540aSrobert 
97*404b540aSrobert       *pstart = s;
98*404b540aSrobert       *pend = e;
99*404b540aSrobert       thr->ts.static_trip = (e0 == n ? -1 : 1);
100*404b540aSrobert       return 0;
101*404b540aSrobert     }
102*404b540aSrobert   else
103*404b540aSrobert     {
104*404b540aSrobert       unsigned long n, s0, e0, i, c;
105*404b540aSrobert       long s, e;
106*404b540aSrobert 
107*404b540aSrobert       /* Otherwise, each thread gets exactly chunk_size iterations
108*404b540aSrobert 	 (if available) each time through the loop.  */
109*404b540aSrobert 
110*404b540aSrobert       s = ws->incr + (ws->incr > 0 ? -1 : 1);
111*404b540aSrobert       n = (ws->end - ws->next + s) / ws->incr;
112*404b540aSrobert       i = thr->ts.team_id;
113*404b540aSrobert       c = ws->chunk_size;
114*404b540aSrobert 
115*404b540aSrobert       /* Initial guess is a C sized chunk positioned nthreads iterations
116*404b540aSrobert 	 in, offset by our thread number.  */
117*404b540aSrobert       s0 = (thr->ts.static_trip * nthreads + i) * c;
118*404b540aSrobert       e0 = s0 + c;
119*404b540aSrobert 
120*404b540aSrobert       /* Detect overflow.  */
121*404b540aSrobert       if (s0 >= n)
122*404b540aSrobert 	return 1;
123*404b540aSrobert       if (e0 > n)
124*404b540aSrobert 	e0 = n;
125*404b540aSrobert 
126*404b540aSrobert       /* Transform these to the actual start and end numbers.  */
127*404b540aSrobert       s = (long)s0 * ws->incr + ws->next;
128*404b540aSrobert       e = (long)e0 * ws->incr + ws->next;
129*404b540aSrobert 
130*404b540aSrobert       *pstart = s;
131*404b540aSrobert       *pend = e;
132*404b540aSrobert 
133*404b540aSrobert       if (e0 == n)
134*404b540aSrobert 	thr->ts.static_trip = -1;
135*404b540aSrobert       else
136*404b540aSrobert 	thr->ts.static_trip++;
137*404b540aSrobert       return 0;
138*404b540aSrobert     }
139*404b540aSrobert }
140*404b540aSrobert 
141*404b540aSrobert 
142*404b540aSrobert /* This function implements the DYNAMIC scheduling method.  Arguments are
143*404b540aSrobert    as for gomp_iter_static_next.  This function must be called with ws->lock
144*404b540aSrobert    held.  */
145*404b540aSrobert 
146*404b540aSrobert bool
gomp_iter_dynamic_next_locked(long * pstart,long * pend)147*404b540aSrobert gomp_iter_dynamic_next_locked (long *pstart, long *pend)
148*404b540aSrobert {
149*404b540aSrobert   struct gomp_thread *thr = gomp_thread ();
150*404b540aSrobert   struct gomp_work_share *ws = thr->ts.work_share;
151*404b540aSrobert   long start, end, chunk, left;
152*404b540aSrobert 
153*404b540aSrobert   start = ws->next;
154*404b540aSrobert   if (start == ws->end)
155*404b540aSrobert     return false;
156*404b540aSrobert 
157*404b540aSrobert   chunk = ws->chunk_size * ws->incr;
158*404b540aSrobert   left = ws->end - start;
159*404b540aSrobert   if (ws->incr < 0)
160*404b540aSrobert     {
161*404b540aSrobert       if (chunk < left)
162*404b540aSrobert 	chunk = left;
163*404b540aSrobert     }
164*404b540aSrobert   else
165*404b540aSrobert     {
166*404b540aSrobert       if (chunk > left)
167*404b540aSrobert 	chunk = left;
168*404b540aSrobert     }
169*404b540aSrobert   end = start + chunk;
170*404b540aSrobert 
171*404b540aSrobert   ws->next = end;
172*404b540aSrobert   *pstart = start;
173*404b540aSrobert   *pend = end;
174*404b540aSrobert   return true;
175*404b540aSrobert }
176*404b540aSrobert 
177*404b540aSrobert 
178*404b540aSrobert #ifdef HAVE_SYNC_BUILTINS
179*404b540aSrobert /* Similar, but doesn't require the lock held, and uses compare-and-swap
180*404b540aSrobert    instead.  Note that the only memory value that changes is ws->next.  */
181*404b540aSrobert 
182*404b540aSrobert bool
gomp_iter_dynamic_next(long * pstart,long * pend)183*404b540aSrobert gomp_iter_dynamic_next (long *pstart, long *pend)
184*404b540aSrobert {
185*404b540aSrobert   struct gomp_thread *thr = gomp_thread ();
186*404b540aSrobert   struct gomp_work_share *ws = thr->ts.work_share;
187*404b540aSrobert   long start, end, nend, chunk, incr;
188*404b540aSrobert 
189*404b540aSrobert   start = ws->next;
190*404b540aSrobert   end = ws->end;
191*404b540aSrobert   incr = ws->incr;
192*404b540aSrobert   chunk = ws->chunk_size * incr;
193*404b540aSrobert 
194*404b540aSrobert   while (1)
195*404b540aSrobert     {
196*404b540aSrobert       long left = end - start;
197*404b540aSrobert       long tmp;
198*404b540aSrobert 
199*404b540aSrobert       if (start == end)
200*404b540aSrobert 	return false;
201*404b540aSrobert 
202*404b540aSrobert       if (incr < 0)
203*404b540aSrobert 	{
204*404b540aSrobert 	  if (chunk < left)
205*404b540aSrobert 	    chunk = left;
206*404b540aSrobert 	}
207*404b540aSrobert       else
208*404b540aSrobert 	{
209*404b540aSrobert 	  if (chunk > left)
210*404b540aSrobert 	    chunk = left;
211*404b540aSrobert 	}
212*404b540aSrobert       nend = start + chunk;
213*404b540aSrobert 
214*404b540aSrobert       tmp = __sync_val_compare_and_swap (&ws->next, start, nend);
215*404b540aSrobert       if (__builtin_expect (tmp == start, 1))
216*404b540aSrobert 	break;
217*404b540aSrobert 
218*404b540aSrobert       start = tmp;
219*404b540aSrobert     }
220*404b540aSrobert 
221*404b540aSrobert   *pstart = start;
222*404b540aSrobert   *pend = nend;
223*404b540aSrobert   return true;
224*404b540aSrobert }
225*404b540aSrobert #endif /* HAVE_SYNC_BUILTINS */
226*404b540aSrobert 
227*404b540aSrobert 
228*404b540aSrobert /* This function implements the GUIDED scheduling method.  Arguments are
229*404b540aSrobert    as for gomp_iter_static_next.  This function must be called with the
230*404b540aSrobert    work share lock held.  */
231*404b540aSrobert 
232*404b540aSrobert bool
gomp_iter_guided_next_locked(long * pstart,long * pend)233*404b540aSrobert gomp_iter_guided_next_locked (long *pstart, long *pend)
234*404b540aSrobert {
235*404b540aSrobert   struct gomp_thread *thr = gomp_thread ();
236*404b540aSrobert   struct gomp_work_share *ws = thr->ts.work_share;
237*404b540aSrobert   struct gomp_team *team = thr->ts.team;
238*404b540aSrobert   unsigned long nthreads = team ? team->nthreads : 1;
239*404b540aSrobert   unsigned long n, q;
240*404b540aSrobert   long start, end;
241*404b540aSrobert 
242*404b540aSrobert   if (ws->next == ws->end)
243*404b540aSrobert     return false;
244*404b540aSrobert 
245*404b540aSrobert   n = (ws->end - ws->next) / ws->incr;
246*404b540aSrobert   q = (n + nthreads - 1) / nthreads;
247*404b540aSrobert 
248*404b540aSrobert   if (q < ws->chunk_size)
249*404b540aSrobert     q = ws->chunk_size;
250*404b540aSrobert   if (q > n)
251*404b540aSrobert     q = n;
252*404b540aSrobert 
253*404b540aSrobert   start = ws->next;
254*404b540aSrobert   end = start + q * ws->incr;
255*404b540aSrobert 
256*404b540aSrobert   ws->next = end;
257*404b540aSrobert   *pstart = start;
258*404b540aSrobert   *pend = end;
259*404b540aSrobert   return true;
260*404b540aSrobert }
261*404b540aSrobert 
262*404b540aSrobert #ifdef HAVE_SYNC_BUILTINS
263*404b540aSrobert /* Similar, but doesn't require the lock held, and uses compare-and-swap
264*404b540aSrobert    instead.  Note that the only memory value that changes is ws->next.  */
265*404b540aSrobert 
266*404b540aSrobert bool
gomp_iter_guided_next(long * pstart,long * pend)267*404b540aSrobert gomp_iter_guided_next (long *pstart, long *pend)
268*404b540aSrobert {
269*404b540aSrobert   struct gomp_thread *thr = gomp_thread ();
270*404b540aSrobert   struct gomp_work_share *ws = thr->ts.work_share;
271*404b540aSrobert   struct gomp_team *team = thr->ts.team;
272*404b540aSrobert   unsigned long nthreads = team ? team->nthreads : 1;
273*404b540aSrobert   long start, end, nend, incr;
274*404b540aSrobert   unsigned long chunk_size;
275*404b540aSrobert 
276*404b540aSrobert   start = ws->next;
277*404b540aSrobert   end = ws->end;
278*404b540aSrobert   incr = ws->incr;
279*404b540aSrobert   chunk_size = ws->chunk_size;
280*404b540aSrobert 
281*404b540aSrobert   while (1)
282*404b540aSrobert     {
283*404b540aSrobert       unsigned long n, q;
284*404b540aSrobert       long tmp;
285*404b540aSrobert 
286*404b540aSrobert       if (start == end)
287*404b540aSrobert 	return false;
288*404b540aSrobert 
289*404b540aSrobert       n = (end - start) / ws->incr;
290*404b540aSrobert       q = (n + nthreads - 1) / nthreads;
291*404b540aSrobert 
292*404b540aSrobert       if (q < chunk_size)
293*404b540aSrobert 	q = chunk_size;
294*404b540aSrobert       if (q > n)
295*404b540aSrobert 	q = n;
296*404b540aSrobert 
297*404b540aSrobert       nend = start + q * incr;
298*404b540aSrobert 
299*404b540aSrobert       tmp = __sync_val_compare_and_swap (&ws->next, start, nend);
300*404b540aSrobert       if (__builtin_expect (tmp == start, 1))
301*404b540aSrobert 	break;
302*404b540aSrobert 
303*404b540aSrobert       start = tmp;
304*404b540aSrobert     }
305*404b540aSrobert 
306*404b540aSrobert   *pstart = start;
307*404b540aSrobert   *pend = nend;
308*404b540aSrobert   return true;
309*404b540aSrobert }
310*404b540aSrobert #endif /* HAVE_SYNC_BUILTINS */
311