xref: /netbsd-src/external/gpl3/gcc/dist/libgomp/loop.c (revision b1e838363e3c6fc78a55519254d99869742dd33c)
1*b1e83836Smrg /* Copyright (C) 2005-2022 Free Software Foundation, Inc.
24fee23f9Smrg    Contributed by Richard Henderson <rth@redhat.com>.
34fee23f9Smrg 
44d5abbe8Smrg    This file is part of the GNU Offloading and Multi Processing Library
54d5abbe8Smrg    (libgomp).
64fee23f9Smrg 
74fee23f9Smrg    Libgomp is free software; you can redistribute it and/or modify it
84fee23f9Smrg    under the terms of the GNU General Public License as published by
94fee23f9Smrg    the Free Software Foundation; either version 3, or (at your option)
104fee23f9Smrg    any later version.
114fee23f9Smrg 
124fee23f9Smrg    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
134fee23f9Smrg    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
144fee23f9Smrg    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
154fee23f9Smrg    more details.
164fee23f9Smrg 
174fee23f9Smrg    Under Section 7 of GPL version 3, you are granted additional
184fee23f9Smrg    permissions described in the GCC Runtime Library Exception, version
194fee23f9Smrg    3.1, as published by the Free Software Foundation.
204fee23f9Smrg 
214fee23f9Smrg    You should have received a copy of the GNU General Public License and
224fee23f9Smrg    a copy of the GCC Runtime Library Exception along with this program;
234fee23f9Smrg    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
244fee23f9Smrg    <http://www.gnu.org/licenses/>.  */
254fee23f9Smrg 
264fee23f9Smrg /* This file handles the LOOP (FOR/DO) construct.  */
274fee23f9Smrg 
284fee23f9Smrg #include <limits.h>
294fee23f9Smrg #include <stdlib.h>
30181254a7Smrg #include <string.h>
314fee23f9Smrg #include "libgomp.h"
324fee23f9Smrg 
334fee23f9Smrg 
34181254a7Smrg ialias (GOMP_loop_runtime_next)
ialias_redirect(GOMP_taskgroup_reduction_register)35181254a7Smrg ialias_redirect (GOMP_taskgroup_reduction_register)
36181254a7Smrg 
374fee23f9Smrg /* Initialize the given work share construct from the given arguments.  */
384fee23f9Smrg 
394fee23f9Smrg static inline void
404fee23f9Smrg gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
414fee23f9Smrg 		enum gomp_schedule_type sched, long chunk_size)
424fee23f9Smrg {
434fee23f9Smrg   ws->sched = sched;
444fee23f9Smrg   ws->chunk_size = chunk_size;
454fee23f9Smrg   /* Canonicalize loops that have zero iterations to ->next == ->end.  */
464fee23f9Smrg   ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
474fee23f9Smrg 	    ? start : end;
484fee23f9Smrg   ws->incr = incr;
494fee23f9Smrg   ws->next = start;
504fee23f9Smrg   if (sched == GFS_DYNAMIC)
514fee23f9Smrg     {
524fee23f9Smrg       ws->chunk_size *= incr;
534fee23f9Smrg 
544fee23f9Smrg #ifdef HAVE_SYNC_BUILTINS
554fee23f9Smrg       {
564fee23f9Smrg 	/* For dynamic scheduling prepare things to make each iteration
574fee23f9Smrg 	   faster.  */
584fee23f9Smrg 	struct gomp_thread *thr = gomp_thread ();
594fee23f9Smrg 	struct gomp_team *team = thr->ts.team;
604fee23f9Smrg 	long nthreads = team ? team->nthreads : 1;
614fee23f9Smrg 
624fee23f9Smrg 	if (__builtin_expect (incr > 0, 1))
634fee23f9Smrg 	  {
644fee23f9Smrg 	    /* Cheap overflow protection.  */
654fee23f9Smrg 	    if (__builtin_expect ((nthreads | ws->chunk_size)
664fee23f9Smrg 				  >= 1UL << (sizeof (long)
674fee23f9Smrg 					     * __CHAR_BIT__ / 2 - 1), 0))
684fee23f9Smrg 	      ws->mode = 0;
694fee23f9Smrg 	    else
704fee23f9Smrg 	      ws->mode = ws->end < (LONG_MAX
714fee23f9Smrg 				    - (nthreads + 1) * ws->chunk_size);
724fee23f9Smrg 	  }
734fee23f9Smrg 	/* Cheap overflow protection.  */
744fee23f9Smrg 	else if (__builtin_expect ((nthreads | -ws->chunk_size)
754fee23f9Smrg 				   >= 1UL << (sizeof (long)
764fee23f9Smrg 					      * __CHAR_BIT__ / 2 - 1), 0))
774fee23f9Smrg 	  ws->mode = 0;
784fee23f9Smrg 	else
794fee23f9Smrg 	  ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
804fee23f9Smrg       }
814fee23f9Smrg #endif
824fee23f9Smrg     }
834fee23f9Smrg }
844fee23f9Smrg 
854fee23f9Smrg /* The *_start routines are called when first encountering a loop construct
864fee23f9Smrg    that is not bound directly to a parallel construct.  The first thread
874fee23f9Smrg    that arrives will create the work-share construct; subsequent threads
884fee23f9Smrg    will see the construct exists and allocate work from it.
894fee23f9Smrg 
904fee23f9Smrg    START, END, INCR are the bounds of the loop; due to the restrictions of
914fee23f9Smrg    OpenMP, these values must be the same in every thread.  This is not
924fee23f9Smrg    verified (nor is it entirely verifiable, since START is not necessarily
934fee23f9Smrg    retained intact in the work-share data structure).  CHUNK_SIZE is the
944fee23f9Smrg    scheduling parameter; again this must be identical in all threads.
954fee23f9Smrg 
964fee23f9Smrg    Returns true if there's any work for this thread to perform.  If so,
974fee23f9Smrg    *ISTART and *IEND are filled with the bounds of the iteration block
984fee23f9Smrg    allocated to this thread.  Returns false if all work was assigned to
994fee23f9Smrg    other threads prior to this thread's arrival.  */
1004fee23f9Smrg 
1014fee23f9Smrg static bool
gomp_loop_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1024fee23f9Smrg gomp_loop_static_start (long start, long end, long incr, long chunk_size,
1034fee23f9Smrg 			long *istart, long *iend)
1044fee23f9Smrg {
1054fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
1064fee23f9Smrg 
1074fee23f9Smrg   thr->ts.static_trip = 0;
108181254a7Smrg   if (gomp_work_share_start (0))
1094fee23f9Smrg     {
1104fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1114fee23f9Smrg 		      GFS_STATIC, chunk_size);
1124fee23f9Smrg       gomp_work_share_init_done ();
1134fee23f9Smrg     }
1144fee23f9Smrg 
1154fee23f9Smrg   return !gomp_iter_static_next (istart, iend);
1164fee23f9Smrg }
1174fee23f9Smrg 
118f9a78e0eSmrg /* The current dynamic implementation is always monotonic.  The
119f9a78e0eSmrg    entrypoints without nonmonotonic in them have to be always monotonic,
120f9a78e0eSmrg    but the nonmonotonic ones could be changed to use work-stealing for
121f9a78e0eSmrg    improved scalability.  */
122f9a78e0eSmrg 
1234fee23f9Smrg static bool
gomp_loop_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1244fee23f9Smrg gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
1254fee23f9Smrg 			 long *istart, long *iend)
1264fee23f9Smrg {
1274fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
1284fee23f9Smrg   bool ret;
1294fee23f9Smrg 
130181254a7Smrg   if (gomp_work_share_start (0))
1314fee23f9Smrg     {
1324fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1334fee23f9Smrg 		      GFS_DYNAMIC, chunk_size);
1344fee23f9Smrg       gomp_work_share_init_done ();
1354fee23f9Smrg     }
1364fee23f9Smrg 
1374fee23f9Smrg #ifdef HAVE_SYNC_BUILTINS
1384fee23f9Smrg   ret = gomp_iter_dynamic_next (istart, iend);
1394fee23f9Smrg #else
1404fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
1414fee23f9Smrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
1424fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
1434fee23f9Smrg #endif
1444fee23f9Smrg 
1454fee23f9Smrg   return ret;
1464fee23f9Smrg }
1474fee23f9Smrg 
148f9a78e0eSmrg /* Similarly as for dynamic, though the question is how can the chunk sizes
149f9a78e0eSmrg    be decreased without a central locking or atomics.  */
150f9a78e0eSmrg 
1514fee23f9Smrg static bool
gomp_loop_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1524fee23f9Smrg gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
1534fee23f9Smrg 			long *istart, long *iend)
1544fee23f9Smrg {
1554fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
1564fee23f9Smrg   bool ret;
1574fee23f9Smrg 
158181254a7Smrg   if (gomp_work_share_start (0))
1594fee23f9Smrg     {
1604fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1614fee23f9Smrg 		      GFS_GUIDED, chunk_size);
1624fee23f9Smrg       gomp_work_share_init_done ();
1634fee23f9Smrg     }
1644fee23f9Smrg 
1654fee23f9Smrg #ifdef HAVE_SYNC_BUILTINS
1664fee23f9Smrg   ret = gomp_iter_guided_next (istart, iend);
1674fee23f9Smrg #else
1684fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
1694fee23f9Smrg   ret = gomp_iter_guided_next_locked (istart, iend);
1704fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
1714fee23f9Smrg #endif
1724fee23f9Smrg 
1734fee23f9Smrg   return ret;
1744fee23f9Smrg }
1754fee23f9Smrg 
1764fee23f9Smrg bool
GOMP_loop_runtime_start(long start,long end,long incr,long * istart,long * iend)1774fee23f9Smrg GOMP_loop_runtime_start (long start, long end, long incr,
1784fee23f9Smrg 			 long *istart, long *iend)
1794fee23f9Smrg {
1804fee23f9Smrg   struct gomp_task_icv *icv = gomp_icv (false);
181181254a7Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
1824fee23f9Smrg     {
1834fee23f9Smrg     case GFS_STATIC:
184f9a78e0eSmrg       return gomp_loop_static_start (start, end, incr,
185f9a78e0eSmrg 				     icv->run_sched_chunk_size,
1864fee23f9Smrg 				     istart, iend);
1874fee23f9Smrg     case GFS_DYNAMIC:
188f9a78e0eSmrg       return gomp_loop_dynamic_start (start, end, incr,
189f9a78e0eSmrg 				      icv->run_sched_chunk_size,
1904fee23f9Smrg 				      istart, iend);
1914fee23f9Smrg     case GFS_GUIDED:
192f9a78e0eSmrg       return gomp_loop_guided_start (start, end, incr,
193f9a78e0eSmrg 				     icv->run_sched_chunk_size,
1944fee23f9Smrg 				     istart, iend);
1954fee23f9Smrg     case GFS_AUTO:
1964fee23f9Smrg       /* For now map to schedule(static), later on we could play with feedback
1974fee23f9Smrg 	 driven choice.  */
1984fee23f9Smrg       return gomp_loop_static_start (start, end, incr, 0, istart, iend);
1994fee23f9Smrg     default:
2004fee23f9Smrg       abort ();
2014fee23f9Smrg     }
2024fee23f9Smrg }
2034fee23f9Smrg 
204181254a7Smrg static long
gomp_adjust_sched(long sched,long * chunk_size)205181254a7Smrg gomp_adjust_sched (long sched, long *chunk_size)
206181254a7Smrg {
207181254a7Smrg   sched &= ~GFS_MONOTONIC;
208181254a7Smrg   switch (sched)
209181254a7Smrg     {
210181254a7Smrg     case GFS_STATIC:
211181254a7Smrg     case GFS_DYNAMIC:
212181254a7Smrg     case GFS_GUIDED:
213181254a7Smrg       return sched;
214181254a7Smrg     /* GFS_RUNTIME is used for runtime schedule without monotonic
215181254a7Smrg        or nonmonotonic modifiers on the clause.
216181254a7Smrg        GFS_RUNTIME|GFS_MONOTONIC for runtime schedule with monotonic
217181254a7Smrg        modifier.  */
218181254a7Smrg     case GFS_RUNTIME:
219181254a7Smrg     /* GFS_AUTO is used for runtime schedule with nonmonotonic
220181254a7Smrg        modifier.  */
221181254a7Smrg     case GFS_AUTO:
222181254a7Smrg       {
223181254a7Smrg 	struct gomp_task_icv *icv = gomp_icv (false);
224181254a7Smrg 	sched = icv->run_sched_var & ~GFS_MONOTONIC;
225181254a7Smrg 	switch (sched)
226181254a7Smrg 	  {
227181254a7Smrg 	  case GFS_STATIC:
228181254a7Smrg 	  case GFS_DYNAMIC:
229181254a7Smrg 	  case GFS_GUIDED:
230181254a7Smrg 	    *chunk_size = icv->run_sched_chunk_size;
231181254a7Smrg 	    break;
232181254a7Smrg 	  case GFS_AUTO:
233181254a7Smrg 	    sched = GFS_STATIC;
234181254a7Smrg 	    *chunk_size = 0;
235181254a7Smrg 	    break;
236181254a7Smrg 	  default:
237181254a7Smrg 	    abort ();
238181254a7Smrg 	  }
239181254a7Smrg 	return sched;
240181254a7Smrg       }
241181254a7Smrg     default:
242181254a7Smrg       abort ();
243181254a7Smrg     }
244181254a7Smrg }
245181254a7Smrg 
246181254a7Smrg bool
GOMP_loop_start(long start,long end,long incr,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)247181254a7Smrg GOMP_loop_start (long start, long end, long incr, long sched,
248181254a7Smrg 		 long chunk_size, long *istart, long *iend,
249181254a7Smrg 		 uintptr_t *reductions, void **mem)
250181254a7Smrg {
251181254a7Smrg   struct gomp_thread *thr = gomp_thread ();
252181254a7Smrg 
253181254a7Smrg   thr->ts.static_trip = 0;
254181254a7Smrg   if (reductions)
255181254a7Smrg     gomp_workshare_taskgroup_start ();
256181254a7Smrg   if (gomp_work_share_start (0))
257181254a7Smrg     {
258181254a7Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
259181254a7Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
260181254a7Smrg 		      sched, chunk_size);
261181254a7Smrg       if (reductions)
262181254a7Smrg 	{
263181254a7Smrg 	  GOMP_taskgroup_reduction_register (reductions);
264181254a7Smrg 	  thr->task->taskgroup->workshare = true;
265181254a7Smrg 	  thr->ts.work_share->task_reductions = reductions;
266181254a7Smrg 	}
267181254a7Smrg       if (mem)
268181254a7Smrg 	{
269181254a7Smrg 	  uintptr_t size = (uintptr_t) *mem;
270181254a7Smrg #define INLINE_ORDERED_TEAM_IDS_OFF \
271181254a7Smrg   ((offsetof (struct gomp_work_share, inline_ordered_team_ids)		\
272181254a7Smrg     + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
273*b1e83836Smrg 	  if (sizeof (struct gomp_work_share)
274*b1e83836Smrg 	      <= INLINE_ORDERED_TEAM_IDS_OFF
275*b1e83836Smrg 	      || __alignof__ (struct gomp_work_share) < __alignof__ (long long)
276*b1e83836Smrg 	      || size > (sizeof (struct gomp_work_share)
277181254a7Smrg 			- INLINE_ORDERED_TEAM_IDS_OFF))
278181254a7Smrg 	    *mem
279181254a7Smrg 	      = (void *) (thr->ts.work_share->ordered_team_ids
280181254a7Smrg 			  = gomp_malloc_cleared (size));
281181254a7Smrg 	  else
282181254a7Smrg 	    *mem = memset (((char *) thr->ts.work_share)
283181254a7Smrg 			   + INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
284181254a7Smrg 	}
285181254a7Smrg       gomp_work_share_init_done ();
286181254a7Smrg     }
287181254a7Smrg   else
288181254a7Smrg     {
289181254a7Smrg       if (reductions)
290181254a7Smrg 	{
291181254a7Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
292181254a7Smrg 	  gomp_workshare_task_reduction_register (reductions,
293181254a7Smrg 						  first_reductions);
294181254a7Smrg 	}
295181254a7Smrg       if (mem)
296181254a7Smrg 	{
297181254a7Smrg 	  if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
298181254a7Smrg 	       & (__alignof__ (long long) - 1)) == 0)
299181254a7Smrg 	    *mem = (void *) thr->ts.work_share->ordered_team_ids;
300181254a7Smrg 	  else
301181254a7Smrg 	    {
302181254a7Smrg 	      uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
303181254a7Smrg 	      p += __alignof__ (long long) - 1;
304181254a7Smrg 	      p &= ~(__alignof__ (long long) - 1);
305181254a7Smrg 	      *mem = (void *) p;
306181254a7Smrg 	    }
307181254a7Smrg 	}
308181254a7Smrg     }
309181254a7Smrg 
310181254a7Smrg   if (!istart)
311181254a7Smrg     return true;
312181254a7Smrg   return ialias_call (GOMP_loop_runtime_next) (istart, iend);
313181254a7Smrg }
314181254a7Smrg 
3154fee23f9Smrg /* The *_ordered_*_start routines are similar.  The only difference is that
3164fee23f9Smrg    this work-share construct is initialized to expect an ORDERED section.  */
3174fee23f9Smrg 
3184fee23f9Smrg static bool
gomp_loop_ordered_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3194fee23f9Smrg gomp_loop_ordered_static_start (long start, long end, long incr,
3204fee23f9Smrg 				long chunk_size, long *istart, long *iend)
3214fee23f9Smrg {
3224fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
3234fee23f9Smrg 
3244fee23f9Smrg   thr->ts.static_trip = 0;
325181254a7Smrg   if (gomp_work_share_start (1))
3264fee23f9Smrg     {
3274fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3284fee23f9Smrg 		      GFS_STATIC, chunk_size);
3294fee23f9Smrg       gomp_ordered_static_init ();
3304fee23f9Smrg       gomp_work_share_init_done ();
3314fee23f9Smrg     }
3324fee23f9Smrg 
3334fee23f9Smrg   return !gomp_iter_static_next (istart, iend);
3344fee23f9Smrg }
3354fee23f9Smrg 
3364fee23f9Smrg static bool
gomp_loop_ordered_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3374fee23f9Smrg gomp_loop_ordered_dynamic_start (long start, long end, long incr,
3384fee23f9Smrg 				 long chunk_size, long *istart, long *iend)
3394fee23f9Smrg {
3404fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
3414fee23f9Smrg   bool ret;
3424fee23f9Smrg 
343181254a7Smrg   if (gomp_work_share_start (1))
3444fee23f9Smrg     {
3454fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3464fee23f9Smrg 		      GFS_DYNAMIC, chunk_size);
3474fee23f9Smrg       gomp_mutex_lock (&thr->ts.work_share->lock);
3484fee23f9Smrg       gomp_work_share_init_done ();
3494fee23f9Smrg     }
3504fee23f9Smrg   else
3514fee23f9Smrg     gomp_mutex_lock (&thr->ts.work_share->lock);
3524fee23f9Smrg 
3534fee23f9Smrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
3544fee23f9Smrg   if (ret)
3554fee23f9Smrg     gomp_ordered_first ();
3564fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
3574fee23f9Smrg 
3584fee23f9Smrg   return ret;
3594fee23f9Smrg }
3604fee23f9Smrg 
3614fee23f9Smrg static bool
gomp_loop_ordered_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3624fee23f9Smrg gomp_loop_ordered_guided_start (long start, long end, long incr,
3634fee23f9Smrg 				long chunk_size, long *istart, long *iend)
3644fee23f9Smrg {
3654fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
3664fee23f9Smrg   bool ret;
3674fee23f9Smrg 
368181254a7Smrg   if (gomp_work_share_start (1))
3694fee23f9Smrg     {
3704fee23f9Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3714fee23f9Smrg 		      GFS_GUIDED, chunk_size);
3724fee23f9Smrg       gomp_mutex_lock (&thr->ts.work_share->lock);
3734fee23f9Smrg       gomp_work_share_init_done ();
3744fee23f9Smrg     }
3754fee23f9Smrg   else
3764fee23f9Smrg     gomp_mutex_lock (&thr->ts.work_share->lock);
3774fee23f9Smrg 
3784fee23f9Smrg   ret = gomp_iter_guided_next_locked (istart, iend);
3794fee23f9Smrg   if (ret)
3804fee23f9Smrg     gomp_ordered_first ();
3814fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
3824fee23f9Smrg 
3834fee23f9Smrg   return ret;
3844fee23f9Smrg }
3854fee23f9Smrg 
3864fee23f9Smrg bool
GOMP_loop_ordered_runtime_start(long start,long end,long incr,long * istart,long * iend)3874fee23f9Smrg GOMP_loop_ordered_runtime_start (long start, long end, long incr,
3884fee23f9Smrg 				 long *istart, long *iend)
3894fee23f9Smrg {
3904fee23f9Smrg   struct gomp_task_icv *icv = gomp_icv (false);
391181254a7Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
3924fee23f9Smrg     {
3934fee23f9Smrg     case GFS_STATIC:
3944fee23f9Smrg       return gomp_loop_ordered_static_start (start, end, incr,
395f9a78e0eSmrg 					     icv->run_sched_chunk_size,
3964fee23f9Smrg 					     istart, iend);
3974fee23f9Smrg     case GFS_DYNAMIC:
3984fee23f9Smrg       return gomp_loop_ordered_dynamic_start (start, end, incr,
399f9a78e0eSmrg 					      icv->run_sched_chunk_size,
4004fee23f9Smrg 					      istart, iend);
4014fee23f9Smrg     case GFS_GUIDED:
4024fee23f9Smrg       return gomp_loop_ordered_guided_start (start, end, incr,
403f9a78e0eSmrg 					     icv->run_sched_chunk_size,
4044fee23f9Smrg 					     istart, iend);
4054fee23f9Smrg     case GFS_AUTO:
4064fee23f9Smrg       /* For now map to schedule(static), later on we could play with feedback
4074fee23f9Smrg 	 driven choice.  */
4084fee23f9Smrg       return gomp_loop_ordered_static_start (start, end, incr,
4094fee23f9Smrg 					     0, istart, iend);
4104fee23f9Smrg     default:
4114fee23f9Smrg       abort ();
4124fee23f9Smrg     }
4134fee23f9Smrg }
4144fee23f9Smrg 
415181254a7Smrg bool
GOMP_loop_ordered_start(long start,long end,long incr,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)416181254a7Smrg GOMP_loop_ordered_start (long start, long end, long incr, long sched,
417181254a7Smrg 			 long chunk_size, long *istart, long *iend,
418181254a7Smrg 			 uintptr_t *reductions, void **mem)
419181254a7Smrg {
420181254a7Smrg   struct gomp_thread *thr = gomp_thread ();
421181254a7Smrg   size_t ordered = 1;
422181254a7Smrg   bool ret;
423181254a7Smrg 
424181254a7Smrg   thr->ts.static_trip = 0;
425181254a7Smrg   if (reductions)
426181254a7Smrg     gomp_workshare_taskgroup_start ();
427181254a7Smrg   if (mem)
428181254a7Smrg     ordered += (uintptr_t) *mem;
429181254a7Smrg   if (gomp_work_share_start (ordered))
430181254a7Smrg     {
431181254a7Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
432181254a7Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
433181254a7Smrg 		      sched, chunk_size);
434181254a7Smrg       if (reductions)
435181254a7Smrg 	{
436181254a7Smrg 	  GOMP_taskgroup_reduction_register (reductions);
437181254a7Smrg 	  thr->task->taskgroup->workshare = true;
438181254a7Smrg 	  thr->ts.work_share->task_reductions = reductions;
439181254a7Smrg 	}
440181254a7Smrg       if (sched == GFS_STATIC)
441181254a7Smrg 	gomp_ordered_static_init ();
442181254a7Smrg       else
443181254a7Smrg 	gomp_mutex_lock (&thr->ts.work_share->lock);
444181254a7Smrg       gomp_work_share_init_done ();
445181254a7Smrg     }
446181254a7Smrg   else
447181254a7Smrg     {
448181254a7Smrg       if (reductions)
449181254a7Smrg 	{
450181254a7Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
451181254a7Smrg 	  gomp_workshare_task_reduction_register (reductions,
452181254a7Smrg 						  first_reductions);
453181254a7Smrg 	}
454181254a7Smrg       sched = thr->ts.work_share->sched;
455181254a7Smrg       if (sched != GFS_STATIC)
456181254a7Smrg 	gomp_mutex_lock (&thr->ts.work_share->lock);
457181254a7Smrg     }
458181254a7Smrg 
459181254a7Smrg   if (mem)
460181254a7Smrg     {
461181254a7Smrg       uintptr_t p
462181254a7Smrg 	= (uintptr_t) (thr->ts.work_share->ordered_team_ids
463181254a7Smrg 		       + (thr->ts.team ? thr->ts.team->nthreads : 1));
464181254a7Smrg       p += __alignof__ (long long) - 1;
465181254a7Smrg       p &= ~(__alignof__ (long long) - 1);
466181254a7Smrg       *mem = (void *) p;
467181254a7Smrg     }
468181254a7Smrg 
469181254a7Smrg   switch (sched)
470181254a7Smrg     {
471181254a7Smrg     case GFS_STATIC:
472181254a7Smrg     case GFS_AUTO:
473181254a7Smrg       return !gomp_iter_static_next (istart, iend);
474181254a7Smrg     case GFS_DYNAMIC:
475181254a7Smrg       ret = gomp_iter_dynamic_next_locked (istart, iend);
476181254a7Smrg       break;
477181254a7Smrg     case GFS_GUIDED:
478181254a7Smrg       ret = gomp_iter_guided_next_locked (istart, iend);
479181254a7Smrg       break;
480181254a7Smrg     default:
481181254a7Smrg       abort ();
482181254a7Smrg     }
483181254a7Smrg 
484181254a7Smrg   if (ret)
485181254a7Smrg     gomp_ordered_first ();
486181254a7Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
487181254a7Smrg   return ret;
488181254a7Smrg }
489181254a7Smrg 
490f9a78e0eSmrg /* The *_doacross_*_start routines are similar.  The only difference is that
491f9a78e0eSmrg    this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
492f9a78e0eSmrg    section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
493f9a78e0eSmrg    and other COUNTS array elements tell the library number of iterations
494f9a78e0eSmrg    in the ordered inner loops.  */
495f9a78e0eSmrg 
496f9a78e0eSmrg static bool
gomp_loop_doacross_static_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)497f9a78e0eSmrg gomp_loop_doacross_static_start (unsigned ncounts, long *counts,
498f9a78e0eSmrg 				 long chunk_size, long *istart, long *iend)
499f9a78e0eSmrg {
500f9a78e0eSmrg   struct gomp_thread *thr = gomp_thread ();
501f9a78e0eSmrg 
502f9a78e0eSmrg   thr->ts.static_trip = 0;
503181254a7Smrg   if (gomp_work_share_start (0))
504f9a78e0eSmrg     {
505f9a78e0eSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
506f9a78e0eSmrg 		      GFS_STATIC, chunk_size);
507181254a7Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
508f9a78e0eSmrg       gomp_work_share_init_done ();
509f9a78e0eSmrg     }
510f9a78e0eSmrg 
511f9a78e0eSmrg   return !gomp_iter_static_next (istart, iend);
512f9a78e0eSmrg }
513f9a78e0eSmrg 
514f9a78e0eSmrg static bool
gomp_loop_doacross_dynamic_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)515f9a78e0eSmrg gomp_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
516f9a78e0eSmrg 				  long chunk_size, long *istart, long *iend)
517f9a78e0eSmrg {
518f9a78e0eSmrg   struct gomp_thread *thr = gomp_thread ();
519f9a78e0eSmrg   bool ret;
520f9a78e0eSmrg 
521181254a7Smrg   if (gomp_work_share_start (0))
522f9a78e0eSmrg     {
523f9a78e0eSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
524f9a78e0eSmrg 		      GFS_DYNAMIC, chunk_size);
525181254a7Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
526f9a78e0eSmrg       gomp_work_share_init_done ();
527f9a78e0eSmrg     }
528f9a78e0eSmrg 
529f9a78e0eSmrg #ifdef HAVE_SYNC_BUILTINS
530f9a78e0eSmrg   ret = gomp_iter_dynamic_next (istart, iend);
531f9a78e0eSmrg #else
532f9a78e0eSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
533f9a78e0eSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
534f9a78e0eSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
535f9a78e0eSmrg #endif
536f9a78e0eSmrg 
537f9a78e0eSmrg   return ret;
538f9a78e0eSmrg }
539f9a78e0eSmrg 
540f9a78e0eSmrg static bool
gomp_loop_doacross_guided_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)541f9a78e0eSmrg gomp_loop_doacross_guided_start (unsigned ncounts, long *counts,
542f9a78e0eSmrg 				 long chunk_size, long *istart, long *iend)
543f9a78e0eSmrg {
544f9a78e0eSmrg   struct gomp_thread *thr = gomp_thread ();
545f9a78e0eSmrg   bool ret;
546f9a78e0eSmrg 
547181254a7Smrg   if (gomp_work_share_start (0))
548f9a78e0eSmrg     {
549f9a78e0eSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
550f9a78e0eSmrg 		      GFS_GUIDED, chunk_size);
551181254a7Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
552f9a78e0eSmrg       gomp_work_share_init_done ();
553f9a78e0eSmrg     }
554f9a78e0eSmrg 
555f9a78e0eSmrg #ifdef HAVE_SYNC_BUILTINS
556f9a78e0eSmrg   ret = gomp_iter_guided_next (istart, iend);
557f9a78e0eSmrg #else
558f9a78e0eSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
559f9a78e0eSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
560f9a78e0eSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
561f9a78e0eSmrg #endif
562f9a78e0eSmrg 
563f9a78e0eSmrg   return ret;
564f9a78e0eSmrg }
565f9a78e0eSmrg 
566f9a78e0eSmrg bool
GOMP_loop_doacross_runtime_start(unsigned ncounts,long * counts,long * istart,long * iend)567f9a78e0eSmrg GOMP_loop_doacross_runtime_start (unsigned ncounts, long *counts,
568f9a78e0eSmrg 				  long *istart, long *iend)
569f9a78e0eSmrg {
570f9a78e0eSmrg   struct gomp_task_icv *icv = gomp_icv (false);
571181254a7Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
572f9a78e0eSmrg     {
573f9a78e0eSmrg     case GFS_STATIC:
574f9a78e0eSmrg       return gomp_loop_doacross_static_start (ncounts, counts,
575f9a78e0eSmrg 					      icv->run_sched_chunk_size,
576f9a78e0eSmrg 					      istart, iend);
577f9a78e0eSmrg     case GFS_DYNAMIC:
578f9a78e0eSmrg       return gomp_loop_doacross_dynamic_start (ncounts, counts,
579f9a78e0eSmrg 					       icv->run_sched_chunk_size,
580f9a78e0eSmrg 					       istart, iend);
581f9a78e0eSmrg     case GFS_GUIDED:
582f9a78e0eSmrg       return gomp_loop_doacross_guided_start (ncounts, counts,
583f9a78e0eSmrg 					      icv->run_sched_chunk_size,
584f9a78e0eSmrg 					      istart, iend);
585f9a78e0eSmrg     case GFS_AUTO:
586f9a78e0eSmrg       /* For now map to schedule(static), later on we could play with feedback
587f9a78e0eSmrg 	 driven choice.  */
588f9a78e0eSmrg       return gomp_loop_doacross_static_start (ncounts, counts,
589f9a78e0eSmrg 					      0, istart, iend);
590f9a78e0eSmrg     default:
591f9a78e0eSmrg       abort ();
592f9a78e0eSmrg     }
593f9a78e0eSmrg }
594f9a78e0eSmrg 
595181254a7Smrg bool
GOMP_loop_doacross_start(unsigned ncounts,long * counts,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)596181254a7Smrg GOMP_loop_doacross_start (unsigned ncounts, long *counts, long sched,
597181254a7Smrg 			  long chunk_size, long *istart, long *iend,
598181254a7Smrg 			  uintptr_t *reductions, void **mem)
599181254a7Smrg {
600181254a7Smrg   struct gomp_thread *thr = gomp_thread ();
601181254a7Smrg 
602181254a7Smrg   thr->ts.static_trip = 0;
603181254a7Smrg   if (reductions)
604181254a7Smrg     gomp_workshare_taskgroup_start ();
605181254a7Smrg   if (gomp_work_share_start (0))
606181254a7Smrg     {
607181254a7Smrg       size_t extra = 0;
608181254a7Smrg       if (mem)
609181254a7Smrg 	extra = (uintptr_t) *mem;
610181254a7Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
611181254a7Smrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
612181254a7Smrg 		      sched, chunk_size);
613181254a7Smrg       gomp_doacross_init (ncounts, counts, chunk_size, extra);
614181254a7Smrg       if (reductions)
615181254a7Smrg 	{
616181254a7Smrg 	  GOMP_taskgroup_reduction_register (reductions);
617181254a7Smrg 	  thr->task->taskgroup->workshare = true;
618181254a7Smrg 	  thr->ts.work_share->task_reductions = reductions;
619181254a7Smrg 	}
620181254a7Smrg       gomp_work_share_init_done ();
621181254a7Smrg     }
622181254a7Smrg   else
623181254a7Smrg     {
624181254a7Smrg       if (reductions)
625181254a7Smrg 	{
626181254a7Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
627181254a7Smrg 	  gomp_workshare_task_reduction_register (reductions,
628181254a7Smrg 						  first_reductions);
629181254a7Smrg 	}
630181254a7Smrg       sched = thr->ts.work_share->sched;
631181254a7Smrg     }
632181254a7Smrg 
633181254a7Smrg   if (mem)
634181254a7Smrg     *mem = thr->ts.work_share->doacross->extra;
635181254a7Smrg 
636181254a7Smrg   return ialias_call (GOMP_loop_runtime_next) (istart, iend);
637181254a7Smrg }
638181254a7Smrg 
6394fee23f9Smrg /* The *_next routines are called when the thread completes processing of
6404fee23f9Smrg    the iteration block currently assigned to it.  If the work-share
6414fee23f9Smrg    construct is bound directly to a parallel construct, then the iteration
6424fee23f9Smrg    bounds may have been set up before the parallel.  In which case, this
6434fee23f9Smrg    may be the first iteration for the thread.
6444fee23f9Smrg 
6454fee23f9Smrg    Returns true if there is work remaining to be performed; *ISTART and
6464fee23f9Smrg    *IEND are filled with a new iteration block.  Returns false if all work
6474fee23f9Smrg    has been assigned.  */
6484fee23f9Smrg 
6494fee23f9Smrg static bool
gomp_loop_static_next(long * istart,long * iend)6504fee23f9Smrg gomp_loop_static_next (long *istart, long *iend)
6514fee23f9Smrg {
6524fee23f9Smrg   return !gomp_iter_static_next (istart, iend);
6534fee23f9Smrg }
6544fee23f9Smrg 
6554fee23f9Smrg static bool
gomp_loop_dynamic_next(long * istart,long * iend)6564fee23f9Smrg gomp_loop_dynamic_next (long *istart, long *iend)
6574fee23f9Smrg {
6584fee23f9Smrg   bool ret;
6594fee23f9Smrg 
6604fee23f9Smrg #ifdef HAVE_SYNC_BUILTINS
6614fee23f9Smrg   ret = gomp_iter_dynamic_next (istart, iend);
6624fee23f9Smrg #else
6634fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
6644fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
6654fee23f9Smrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
6664fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
6674fee23f9Smrg #endif
6684fee23f9Smrg 
6694fee23f9Smrg   return ret;
6704fee23f9Smrg }
6714fee23f9Smrg 
6724fee23f9Smrg static bool
gomp_loop_guided_next(long * istart,long * iend)6734fee23f9Smrg gomp_loop_guided_next (long *istart, long *iend)
6744fee23f9Smrg {
6754fee23f9Smrg   bool ret;
6764fee23f9Smrg 
6774fee23f9Smrg #ifdef HAVE_SYNC_BUILTINS
6784fee23f9Smrg   ret = gomp_iter_guided_next (istart, iend);
6794fee23f9Smrg #else
6804fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
6814fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
6824fee23f9Smrg   ret = gomp_iter_guided_next_locked (istart, iend);
6834fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
6844fee23f9Smrg #endif
6854fee23f9Smrg 
6864fee23f9Smrg   return ret;
6874fee23f9Smrg }
6884fee23f9Smrg 
6894fee23f9Smrg bool
GOMP_loop_runtime_next(long * istart,long * iend)6904fee23f9Smrg GOMP_loop_runtime_next (long *istart, long *iend)
6914fee23f9Smrg {
6924fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
6934fee23f9Smrg 
6944fee23f9Smrg   switch (thr->ts.work_share->sched)
6954fee23f9Smrg     {
6964fee23f9Smrg     case GFS_STATIC:
6974fee23f9Smrg     case GFS_AUTO:
6984fee23f9Smrg       return gomp_loop_static_next (istart, iend);
6994fee23f9Smrg     case GFS_DYNAMIC:
7004fee23f9Smrg       return gomp_loop_dynamic_next (istart, iend);
7014fee23f9Smrg     case GFS_GUIDED:
7024fee23f9Smrg       return gomp_loop_guided_next (istart, iend);
7034fee23f9Smrg     default:
7044fee23f9Smrg       abort ();
7054fee23f9Smrg     }
7064fee23f9Smrg }
7074fee23f9Smrg 
7084fee23f9Smrg /* The *_ordered_*_next routines are called when the thread completes
7094fee23f9Smrg    processing of the iteration block currently assigned to it.
7104fee23f9Smrg 
7114fee23f9Smrg    Returns true if there is work remaining to be performed; *ISTART and
7124fee23f9Smrg    *IEND are filled with a new iteration block.  Returns false if all work
7134fee23f9Smrg    has been assigned.  */
7144fee23f9Smrg 
7154fee23f9Smrg static bool
gomp_loop_ordered_static_next(long * istart,long * iend)7164fee23f9Smrg gomp_loop_ordered_static_next (long *istart, long *iend)
7174fee23f9Smrg {
7184fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
7194fee23f9Smrg   int test;
7204fee23f9Smrg 
7214fee23f9Smrg   gomp_ordered_sync ();
7224fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7234fee23f9Smrg   test = gomp_iter_static_next (istart, iend);
7244fee23f9Smrg   if (test >= 0)
7254fee23f9Smrg     gomp_ordered_static_next ();
7264fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7274fee23f9Smrg 
7284fee23f9Smrg   return test == 0;
7294fee23f9Smrg }
7304fee23f9Smrg 
7314fee23f9Smrg static bool
gomp_loop_ordered_dynamic_next(long * istart,long * iend)7324fee23f9Smrg gomp_loop_ordered_dynamic_next (long *istart, long *iend)
7334fee23f9Smrg {
7344fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
7354fee23f9Smrg   bool ret;
7364fee23f9Smrg 
7374fee23f9Smrg   gomp_ordered_sync ();
7384fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7394fee23f9Smrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
7404fee23f9Smrg   if (ret)
7414fee23f9Smrg     gomp_ordered_next ();
7424fee23f9Smrg   else
7434fee23f9Smrg     gomp_ordered_last ();
7444fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7454fee23f9Smrg 
7464fee23f9Smrg   return ret;
7474fee23f9Smrg }
7484fee23f9Smrg 
7494fee23f9Smrg static bool
gomp_loop_ordered_guided_next(long * istart,long * iend)7504fee23f9Smrg gomp_loop_ordered_guided_next (long *istart, long *iend)
7514fee23f9Smrg {
7524fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
7534fee23f9Smrg   bool ret;
7544fee23f9Smrg 
7554fee23f9Smrg   gomp_ordered_sync ();
7564fee23f9Smrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7574fee23f9Smrg   ret = gomp_iter_guided_next_locked (istart, iend);
7584fee23f9Smrg   if (ret)
7594fee23f9Smrg     gomp_ordered_next ();
7604fee23f9Smrg   else
7614fee23f9Smrg     gomp_ordered_last ();
7624fee23f9Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7634fee23f9Smrg 
7644fee23f9Smrg   return ret;
7654fee23f9Smrg }
7664fee23f9Smrg 
7674fee23f9Smrg bool
GOMP_loop_ordered_runtime_next(long * istart,long * iend)7684fee23f9Smrg GOMP_loop_ordered_runtime_next (long *istart, long *iend)
7694fee23f9Smrg {
7704fee23f9Smrg   struct gomp_thread *thr = gomp_thread ();
7714fee23f9Smrg 
7724fee23f9Smrg   switch (thr->ts.work_share->sched)
7734fee23f9Smrg     {
7744fee23f9Smrg     case GFS_STATIC:
7754fee23f9Smrg     case GFS_AUTO:
7764fee23f9Smrg       return gomp_loop_ordered_static_next (istart, iend);
7774fee23f9Smrg     case GFS_DYNAMIC:
7784fee23f9Smrg       return gomp_loop_ordered_dynamic_next (istart, iend);
7794fee23f9Smrg     case GFS_GUIDED:
7804fee23f9Smrg       return gomp_loop_ordered_guided_next (istart, iend);
7814fee23f9Smrg     default:
7824fee23f9Smrg       abort ();
7834fee23f9Smrg     }
7844fee23f9Smrg }
7854fee23f9Smrg 
7864fee23f9Smrg /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
7874fee23f9Smrg    to avoid one synchronization once we get into the loop.  */
7884fee23f9Smrg 
7894fee23f9Smrg static void
gomp_parallel_loop_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,enum gomp_schedule_type sched,long chunk_size,unsigned int flags)7904fee23f9Smrg gomp_parallel_loop_start (void (*fn) (void *), void *data,
7914fee23f9Smrg 			  unsigned num_threads, long start, long end,
7924fee23f9Smrg 			  long incr, enum gomp_schedule_type sched,
7934d5abbe8Smrg 			  long chunk_size, unsigned int flags)
7944fee23f9Smrg {
7954fee23f9Smrg   struct gomp_team *team;
7964fee23f9Smrg 
7974fee23f9Smrg   num_threads = gomp_resolve_num_threads (num_threads, 0);
7984fee23f9Smrg   team = gomp_new_team (num_threads);
7994fee23f9Smrg   gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
800181254a7Smrg   gomp_team_start (fn, data, num_threads, flags, team, NULL);
8014fee23f9Smrg }
8024fee23f9Smrg 
8034fee23f9Smrg void
GOMP_parallel_loop_static_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8044fee23f9Smrg GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
8054fee23f9Smrg 				 unsigned num_threads, long start, long end,
8064fee23f9Smrg 				 long incr, long chunk_size)
8074fee23f9Smrg {
8084fee23f9Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8094d5abbe8Smrg 			    GFS_STATIC, chunk_size, 0);
8104fee23f9Smrg }
8114fee23f9Smrg 
8124fee23f9Smrg void
GOMP_parallel_loop_dynamic_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8134fee23f9Smrg GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
8144fee23f9Smrg 				  unsigned num_threads, long start, long end,
8154fee23f9Smrg 				  long incr, long chunk_size)
8164fee23f9Smrg {
8174fee23f9Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8184d5abbe8Smrg 			    GFS_DYNAMIC, chunk_size, 0);
8194fee23f9Smrg }
8204fee23f9Smrg 
8214fee23f9Smrg void
GOMP_parallel_loop_guided_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8224fee23f9Smrg GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
8234fee23f9Smrg 				 unsigned num_threads, long start, long end,
8244fee23f9Smrg 				 long incr, long chunk_size)
8254fee23f9Smrg {
8264fee23f9Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8274d5abbe8Smrg 			    GFS_GUIDED, chunk_size, 0);
8284fee23f9Smrg }
8294fee23f9Smrg 
8304fee23f9Smrg void
GOMP_parallel_loop_runtime_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr)8314fee23f9Smrg GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
8324fee23f9Smrg 				  unsigned num_threads, long start, long end,
8334fee23f9Smrg 				  long incr)
8344fee23f9Smrg {
8354fee23f9Smrg   struct gomp_task_icv *icv = gomp_icv (false);
8364fee23f9Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
837181254a7Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
838181254a7Smrg 			    icv->run_sched_chunk_size, 0);
8394d5abbe8Smrg }
8404d5abbe8Smrg 
ialias_redirect(GOMP_parallel_end)8414d5abbe8Smrg ialias_redirect (GOMP_parallel_end)
8424d5abbe8Smrg 
8434d5abbe8Smrg void
8444d5abbe8Smrg GOMP_parallel_loop_static (void (*fn) (void *), void *data,
8454d5abbe8Smrg 			   unsigned num_threads, long start, long end,
8464d5abbe8Smrg 			   long incr, long chunk_size, unsigned flags)
8474d5abbe8Smrg {
8484d5abbe8Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8494d5abbe8Smrg 			    GFS_STATIC, chunk_size, flags);
8504d5abbe8Smrg   fn (data);
8514d5abbe8Smrg   GOMP_parallel_end ();
8524d5abbe8Smrg }
8534d5abbe8Smrg 
8544d5abbe8Smrg void
GOMP_parallel_loop_dynamic(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)8554d5abbe8Smrg GOMP_parallel_loop_dynamic (void (*fn) (void *), void *data,
8564d5abbe8Smrg 			    unsigned num_threads, long start, long end,
8574d5abbe8Smrg 			    long incr, long chunk_size, unsigned flags)
8584d5abbe8Smrg {
8594d5abbe8Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8604d5abbe8Smrg 			    GFS_DYNAMIC, chunk_size, flags);
8614d5abbe8Smrg   fn (data);
8624d5abbe8Smrg   GOMP_parallel_end ();
8634d5abbe8Smrg }
8644d5abbe8Smrg 
8654d5abbe8Smrg void
GOMP_parallel_loop_guided(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)8664d5abbe8Smrg GOMP_parallel_loop_guided (void (*fn) (void *), void *data,
8674d5abbe8Smrg 			  unsigned num_threads, long start, long end,
8684d5abbe8Smrg 			  long incr, long chunk_size, unsigned flags)
8694d5abbe8Smrg {
8704d5abbe8Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8714d5abbe8Smrg 			    GFS_GUIDED, chunk_size, flags);
8724d5abbe8Smrg   fn (data);
8734d5abbe8Smrg   GOMP_parallel_end ();
8744d5abbe8Smrg }
8754d5abbe8Smrg 
876181254a7Smrg void
GOMP_parallel_loop_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)877181254a7Smrg GOMP_parallel_loop_runtime (void (*fn) (void *), void *data,
878181254a7Smrg 			    unsigned num_threads, long start, long end,
879181254a7Smrg 			    long incr, unsigned flags)
880181254a7Smrg {
881181254a7Smrg   struct gomp_task_icv *icv = gomp_icv (false);
882181254a7Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
883181254a7Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
884181254a7Smrg 			    icv->run_sched_chunk_size, flags);
885181254a7Smrg   fn (data);
886181254a7Smrg   GOMP_parallel_end ();
887181254a7Smrg }
888181254a7Smrg 
889f9a78e0eSmrg #ifdef HAVE_ATTRIBUTE_ALIAS
890f9a78e0eSmrg extern __typeof(GOMP_parallel_loop_dynamic) GOMP_parallel_loop_nonmonotonic_dynamic
891f9a78e0eSmrg 	__attribute__((alias ("GOMP_parallel_loop_dynamic")));
892f9a78e0eSmrg extern __typeof(GOMP_parallel_loop_guided) GOMP_parallel_loop_nonmonotonic_guided
893f9a78e0eSmrg 	__attribute__((alias ("GOMP_parallel_loop_guided")));
894181254a7Smrg extern __typeof(GOMP_parallel_loop_runtime) GOMP_parallel_loop_nonmonotonic_runtime
895181254a7Smrg 	__attribute__((alias ("GOMP_parallel_loop_runtime")));
896181254a7Smrg extern __typeof(GOMP_parallel_loop_runtime) GOMP_parallel_loop_maybe_nonmonotonic_runtime
897181254a7Smrg 	__attribute__((alias ("GOMP_parallel_loop_runtime")));
898f9a78e0eSmrg #else
899f9a78e0eSmrg void
GOMP_parallel_loop_nonmonotonic_dynamic(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)900f9a78e0eSmrg GOMP_parallel_loop_nonmonotonic_dynamic (void (*fn) (void *), void *data,
901f9a78e0eSmrg 					 unsigned num_threads, long start,
902f9a78e0eSmrg 					 long end, long incr, long chunk_size,
903f9a78e0eSmrg 					 unsigned flags)
904f9a78e0eSmrg {
905f9a78e0eSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
906f9a78e0eSmrg 			    GFS_DYNAMIC, chunk_size, flags);
907f9a78e0eSmrg   fn (data);
908f9a78e0eSmrg   GOMP_parallel_end ();
909f9a78e0eSmrg }
910f9a78e0eSmrg 
911f9a78e0eSmrg void
GOMP_parallel_loop_nonmonotonic_guided(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)912f9a78e0eSmrg GOMP_parallel_loop_nonmonotonic_guided (void (*fn) (void *), void *data,
913f9a78e0eSmrg 					unsigned num_threads, long start,
914f9a78e0eSmrg 					long end, long incr, long chunk_size,
915f9a78e0eSmrg 					unsigned flags)
916f9a78e0eSmrg {
917f9a78e0eSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
918f9a78e0eSmrg 			    GFS_GUIDED, chunk_size, flags);
919f9a78e0eSmrg   fn (data);
920f9a78e0eSmrg   GOMP_parallel_end ();
921f9a78e0eSmrg }
922f9a78e0eSmrg 
9234d5abbe8Smrg void
GOMP_parallel_loop_nonmonotonic_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)924181254a7Smrg GOMP_parallel_loop_nonmonotonic_runtime (void (*fn) (void *), void *data,
925181254a7Smrg 					 unsigned num_threads, long start,
926181254a7Smrg 					 long end, long incr, unsigned flags)
9274d5abbe8Smrg {
9284d5abbe8Smrg   struct gomp_task_icv *icv = gomp_icv (false);
9294d5abbe8Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
930181254a7Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
931181254a7Smrg 			    icv->run_sched_chunk_size, flags);
9324d5abbe8Smrg   fn (data);
9334d5abbe8Smrg   GOMP_parallel_end ();
9344fee23f9Smrg }
9354fee23f9Smrg 
936181254a7Smrg void
GOMP_parallel_loop_maybe_nonmonotonic_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)937181254a7Smrg GOMP_parallel_loop_maybe_nonmonotonic_runtime (void (*fn) (void *), void *data,
938181254a7Smrg 					       unsigned num_threads, long start,
939181254a7Smrg 					       long end, long incr,
940181254a7Smrg 					       unsigned flags)
941181254a7Smrg {
942181254a7Smrg   struct gomp_task_icv *icv = gomp_icv (false);
943181254a7Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
944181254a7Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
945181254a7Smrg 			    icv->run_sched_chunk_size, flags);
946181254a7Smrg   fn (data);
947181254a7Smrg   GOMP_parallel_end ();
948181254a7Smrg }
949181254a7Smrg #endif
950181254a7Smrg 
9514fee23f9Smrg /* The GOMP_loop_end* routines are called after the thread is told that
9524d5abbe8Smrg    all loop iterations are complete.  The first two versions synchronize
9534fee23f9Smrg    all threads; the nowait version does not.  */
9544fee23f9Smrg 
9554fee23f9Smrg void
GOMP_loop_end(void)9564fee23f9Smrg GOMP_loop_end (void)
9574fee23f9Smrg {
9584fee23f9Smrg   gomp_work_share_end ();
9594fee23f9Smrg }
9604fee23f9Smrg 
9614d5abbe8Smrg bool
GOMP_loop_end_cancel(void)9624d5abbe8Smrg GOMP_loop_end_cancel (void)
9634d5abbe8Smrg {
9644d5abbe8Smrg   return gomp_work_share_end_cancel ();
9654d5abbe8Smrg }
9664d5abbe8Smrg 
9674fee23f9Smrg void
GOMP_loop_end_nowait(void)9684fee23f9Smrg GOMP_loop_end_nowait (void)
9694fee23f9Smrg {
9704fee23f9Smrg   gomp_work_share_end_nowait ();
9714fee23f9Smrg }
9724fee23f9Smrg 
9734fee23f9Smrg 
9744fee23f9Smrg /* We use static functions above so that we're sure that the "runtime"
9754fee23f9Smrg    function can defer to the proper routine without interposition.  We
9764fee23f9Smrg    export the static function with a strong alias when possible, or with
9774fee23f9Smrg    a wrapper function otherwise.  */
9784fee23f9Smrg 
9794fee23f9Smrg #ifdef HAVE_ATTRIBUTE_ALIAS
9804fee23f9Smrg extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
9814fee23f9Smrg 	__attribute__((alias ("gomp_loop_static_start")));
9824fee23f9Smrg extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
9834fee23f9Smrg 	__attribute__((alias ("gomp_loop_dynamic_start")));
9844fee23f9Smrg extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
9854fee23f9Smrg 	__attribute__((alias ("gomp_loop_guided_start")));
986f9a78e0eSmrg extern __typeof(gomp_loop_dynamic_start) GOMP_loop_nonmonotonic_dynamic_start
987f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_dynamic_start")));
988f9a78e0eSmrg extern __typeof(gomp_loop_guided_start) GOMP_loop_nonmonotonic_guided_start
989f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_guided_start")));
990181254a7Smrg extern __typeof(GOMP_loop_runtime_start) GOMP_loop_nonmonotonic_runtime_start
991181254a7Smrg 	__attribute__((alias ("GOMP_loop_runtime_start")));
992181254a7Smrg extern __typeof(GOMP_loop_runtime_start) GOMP_loop_maybe_nonmonotonic_runtime_start
993181254a7Smrg 	__attribute__((alias ("GOMP_loop_runtime_start")));
9944fee23f9Smrg 
9954fee23f9Smrg extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
9964fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_static_start")));
9974fee23f9Smrg extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
9984fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_dynamic_start")));
9994fee23f9Smrg extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
10004fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_guided_start")));
10014fee23f9Smrg 
1002f9a78e0eSmrg extern __typeof(gomp_loop_doacross_static_start) GOMP_loop_doacross_static_start
1003f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_doacross_static_start")));
1004f9a78e0eSmrg extern __typeof(gomp_loop_doacross_dynamic_start) GOMP_loop_doacross_dynamic_start
1005f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_doacross_dynamic_start")));
1006f9a78e0eSmrg extern __typeof(gomp_loop_doacross_guided_start) GOMP_loop_doacross_guided_start
1007f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_doacross_guided_start")));
1008f9a78e0eSmrg 
10094fee23f9Smrg extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
10104fee23f9Smrg 	__attribute__((alias ("gomp_loop_static_next")));
10114fee23f9Smrg extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
10124fee23f9Smrg 	__attribute__((alias ("gomp_loop_dynamic_next")));
10134fee23f9Smrg extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
10144fee23f9Smrg 	__attribute__((alias ("gomp_loop_guided_next")));
1015f9a78e0eSmrg extern __typeof(gomp_loop_dynamic_next) GOMP_loop_nonmonotonic_dynamic_next
1016f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_dynamic_next")));
1017f9a78e0eSmrg extern __typeof(gomp_loop_guided_next) GOMP_loop_nonmonotonic_guided_next
1018f9a78e0eSmrg 	__attribute__((alias ("gomp_loop_guided_next")));
1019181254a7Smrg extern __typeof(GOMP_loop_runtime_next) GOMP_loop_nonmonotonic_runtime_next
1020181254a7Smrg 	__attribute__((alias ("GOMP_loop_runtime_next")));
1021181254a7Smrg extern __typeof(GOMP_loop_runtime_next) GOMP_loop_maybe_nonmonotonic_runtime_next
1022181254a7Smrg 	__attribute__((alias ("GOMP_loop_runtime_next")));
10234fee23f9Smrg 
10244fee23f9Smrg extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
10254fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_static_next")));
10264fee23f9Smrg extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
10274fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_dynamic_next")));
10284fee23f9Smrg extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
10294fee23f9Smrg 	__attribute__((alias ("gomp_loop_ordered_guided_next")));
10304fee23f9Smrg #else
10314fee23f9Smrg bool
GOMP_loop_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10324fee23f9Smrg GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
10334fee23f9Smrg 			long *istart, long *iend)
10344fee23f9Smrg {
10354fee23f9Smrg   return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
10364fee23f9Smrg }
10374fee23f9Smrg 
10384fee23f9Smrg bool
GOMP_loop_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10394fee23f9Smrg GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
10404fee23f9Smrg 			 long *istart, long *iend)
10414fee23f9Smrg {
10424fee23f9Smrg   return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
10434fee23f9Smrg }
10444fee23f9Smrg 
10454fee23f9Smrg bool
GOMP_loop_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10464fee23f9Smrg GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
10474fee23f9Smrg 			long *istart, long *iend)
10484fee23f9Smrg {
10494fee23f9Smrg   return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
10504fee23f9Smrg }
10514fee23f9Smrg 
10524fee23f9Smrg bool
GOMP_loop_nonmonotonic_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1053f9a78e0eSmrg GOMP_loop_nonmonotonic_dynamic_start (long start, long end, long incr,
1054f9a78e0eSmrg 				      long chunk_size, long *istart,
1055f9a78e0eSmrg 				      long *iend)
1056f9a78e0eSmrg {
1057f9a78e0eSmrg   return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
1058f9a78e0eSmrg }
1059f9a78e0eSmrg 
1060f9a78e0eSmrg bool
GOMP_loop_nonmonotonic_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1061f9a78e0eSmrg GOMP_loop_nonmonotonic_guided_start (long start, long end, long incr,
1062f9a78e0eSmrg 				     long chunk_size, long *istart, long *iend)
1063f9a78e0eSmrg {
1064f9a78e0eSmrg   return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
1065f9a78e0eSmrg }
1066f9a78e0eSmrg 
1067f9a78e0eSmrg bool
GOMP_loop_nonmonotonic_runtime_start(long start,long end,long incr,long * istart,long * iend)1068181254a7Smrg GOMP_loop_nonmonotonic_runtime_start (long start, long end, long incr,
1069181254a7Smrg 				      long *istart, long *iend)
1070181254a7Smrg {
1071181254a7Smrg   return GOMP_loop_runtime_start (start, end, incr, istart, iend);
1072181254a7Smrg }
1073181254a7Smrg 
1074181254a7Smrg bool
GOMP_loop_maybe_nonmonotonic_runtime_start(long start,long end,long incr,long * istart,long * iend)1075181254a7Smrg GOMP_loop_maybe_nonmonotonic_runtime_start (long start, long end, long incr,
1076181254a7Smrg 					    long *istart, long *iend)
1077181254a7Smrg {
1078181254a7Smrg   return GOMP_loop_runtime_start (start, end, incr, istart, iend);
1079181254a7Smrg }
1080181254a7Smrg 
1081181254a7Smrg bool
GOMP_loop_ordered_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10824fee23f9Smrg GOMP_loop_ordered_static_start (long start, long end, long incr,
10834fee23f9Smrg 				long chunk_size, long *istart, long *iend)
10844fee23f9Smrg {
10854fee23f9Smrg   return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
10864fee23f9Smrg 					 istart, iend);
10874fee23f9Smrg }
10884fee23f9Smrg 
10894fee23f9Smrg bool
GOMP_loop_ordered_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10904fee23f9Smrg GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
10914fee23f9Smrg 				 long chunk_size, long *istart, long *iend)
10924fee23f9Smrg {
10934fee23f9Smrg   return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
10944fee23f9Smrg 					  istart, iend);
10954fee23f9Smrg }
10964fee23f9Smrg 
10974fee23f9Smrg bool
GOMP_loop_ordered_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10984fee23f9Smrg GOMP_loop_ordered_guided_start (long start, long end, long incr,
10994fee23f9Smrg 				long chunk_size, long *istart, long *iend)
11004fee23f9Smrg {
11014fee23f9Smrg   return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
11024fee23f9Smrg 					 istart, iend);
11034fee23f9Smrg }
11044fee23f9Smrg 
11054fee23f9Smrg bool
GOMP_loop_doacross_static_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)1106f9a78e0eSmrg GOMP_loop_doacross_static_start (unsigned ncounts, long *counts,
1107f9a78e0eSmrg 				 long chunk_size, long *istart, long *iend)
1108f9a78e0eSmrg {
1109f9a78e0eSmrg   return gomp_loop_doacross_static_start (ncounts, counts, chunk_size,
1110f9a78e0eSmrg 					  istart, iend);
1111f9a78e0eSmrg }
1112f9a78e0eSmrg 
1113f9a78e0eSmrg bool
GOMP_loop_doacross_dynamic_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)1114f9a78e0eSmrg GOMP_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
1115f9a78e0eSmrg 				  long chunk_size, long *istart, long *iend)
1116f9a78e0eSmrg {
1117f9a78e0eSmrg   return gomp_loop_doacross_dynamic_start (ncounts, counts, chunk_size,
1118f9a78e0eSmrg 					   istart, iend);
1119f9a78e0eSmrg }
1120f9a78e0eSmrg 
1121f9a78e0eSmrg bool
GOMP_loop_doacross_guided_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)1122f9a78e0eSmrg GOMP_loop_doacross_guided_start (unsigned ncounts, long *counts,
1123f9a78e0eSmrg 				 long chunk_size, long *istart, long *iend)
1124f9a78e0eSmrg {
1125f9a78e0eSmrg   return gomp_loop_doacross_guided_start (ncounts, counts, chunk_size,
1126f9a78e0eSmrg 					  istart, iend);
1127f9a78e0eSmrg }
1128f9a78e0eSmrg 
1129f9a78e0eSmrg bool
GOMP_loop_static_next(long * istart,long * iend)11304fee23f9Smrg GOMP_loop_static_next (long *istart, long *iend)
11314fee23f9Smrg {
11324fee23f9Smrg   return gomp_loop_static_next (istart, iend);
11334fee23f9Smrg }
11344fee23f9Smrg 
11354fee23f9Smrg bool
GOMP_loop_dynamic_next(long * istart,long * iend)11364fee23f9Smrg GOMP_loop_dynamic_next (long *istart, long *iend)
11374fee23f9Smrg {
11384fee23f9Smrg   return gomp_loop_dynamic_next (istart, iend);
11394fee23f9Smrg }
11404fee23f9Smrg 
11414fee23f9Smrg bool
GOMP_loop_guided_next(long * istart,long * iend)11424fee23f9Smrg GOMP_loop_guided_next (long *istart, long *iend)
11434fee23f9Smrg {
11444fee23f9Smrg   return gomp_loop_guided_next (istart, iend);
11454fee23f9Smrg }
11464fee23f9Smrg 
11474fee23f9Smrg bool
GOMP_loop_nonmonotonic_dynamic_next(long * istart,long * iend)1148f9a78e0eSmrg GOMP_loop_nonmonotonic_dynamic_next (long *istart, long *iend)
1149f9a78e0eSmrg {
1150f9a78e0eSmrg   return gomp_loop_dynamic_next (istart, iend);
1151f9a78e0eSmrg }
1152f9a78e0eSmrg 
1153f9a78e0eSmrg bool
GOMP_loop_nonmonotonic_guided_next(long * istart,long * iend)1154f9a78e0eSmrg GOMP_loop_nonmonotonic_guided_next (long *istart, long *iend)
1155f9a78e0eSmrg {
1156f9a78e0eSmrg   return gomp_loop_guided_next (istart, iend);
1157f9a78e0eSmrg }
1158f9a78e0eSmrg 
1159f9a78e0eSmrg bool
GOMP_loop_nonmonotonic_runtime_next(long * istart,long * iend)1160181254a7Smrg GOMP_loop_nonmonotonic_runtime_next (long *istart, long *iend)
1161181254a7Smrg {
1162181254a7Smrg   return GOMP_loop_runtime_next (istart, iend);
1163181254a7Smrg }
1164181254a7Smrg 
1165181254a7Smrg bool
GOMP_loop_maybe_nonmonotonic_runtime_next(long * istart,long * iend)1166181254a7Smrg GOMP_loop_maybe_nonmonotonic_runtime_next (long *istart, long *iend)
1167181254a7Smrg {
1168181254a7Smrg   return GOMP_loop_runtime_next (istart, iend);
1169181254a7Smrg }
1170181254a7Smrg 
1171181254a7Smrg bool
GOMP_loop_ordered_static_next(long * istart,long * iend)11724fee23f9Smrg GOMP_loop_ordered_static_next (long *istart, long *iend)
11734fee23f9Smrg {
11744fee23f9Smrg   return gomp_loop_ordered_static_next (istart, iend);
11754fee23f9Smrg }
11764fee23f9Smrg 
11774fee23f9Smrg bool
GOMP_loop_ordered_dynamic_next(long * istart,long * iend)11784fee23f9Smrg GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
11794fee23f9Smrg {
11804fee23f9Smrg   return gomp_loop_ordered_dynamic_next (istart, iend);
11814fee23f9Smrg }
11824fee23f9Smrg 
11834fee23f9Smrg bool
GOMP_loop_ordered_guided_next(long * istart,long * iend)11844fee23f9Smrg GOMP_loop_ordered_guided_next (long *istart, long *iend)
11854fee23f9Smrg {
11864fee23f9Smrg   return gomp_loop_ordered_guided_next (istart, iend);
11874fee23f9Smrg }
11884fee23f9Smrg #endif
1189