xref: /netbsd-src/external/gpl3/gcc.old/dist/libgomp/loop.c (revision 8feb0f0b7eaff0608f8350bbfa3098827b4bb91b)
1*8feb0f0bSmrg /* Copyright (C) 2005-2020 Free Software Foundation, Inc.
21debfc3dSmrg    Contributed by Richard Henderson <rth@redhat.com>.
31debfc3dSmrg 
41debfc3dSmrg    This file is part of the GNU Offloading and Multi Processing Library
51debfc3dSmrg    (libgomp).
61debfc3dSmrg 
71debfc3dSmrg    Libgomp is free software; you can redistribute it and/or modify it
81debfc3dSmrg    under the terms of the GNU General Public License as published by
91debfc3dSmrg    the Free Software Foundation; either version 3, or (at your option)
101debfc3dSmrg    any later version.
111debfc3dSmrg 
121debfc3dSmrg    Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
131debfc3dSmrg    WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
141debfc3dSmrg    FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
151debfc3dSmrg    more details.
161debfc3dSmrg 
171debfc3dSmrg    Under Section 7 of GPL version 3, you are granted additional
181debfc3dSmrg    permissions described in the GCC Runtime Library Exception, version
191debfc3dSmrg    3.1, as published by the Free Software Foundation.
201debfc3dSmrg 
211debfc3dSmrg    You should have received a copy of the GNU General Public License and
221debfc3dSmrg    a copy of the GCC Runtime Library Exception along with this program;
231debfc3dSmrg    see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
241debfc3dSmrg    <http://www.gnu.org/licenses/>.  */
251debfc3dSmrg 
261debfc3dSmrg /* This file handles the LOOP (FOR/DO) construct.  */
271debfc3dSmrg 
281debfc3dSmrg #include <limits.h>
291debfc3dSmrg #include <stdlib.h>
30c0a68be4Smrg #include <string.h>
311debfc3dSmrg #include "libgomp.h"
321debfc3dSmrg 
331debfc3dSmrg 
34c0a68be4Smrg ialias (GOMP_loop_runtime_next)
ialias_redirect(GOMP_taskgroup_reduction_register)35c0a68be4Smrg ialias_redirect (GOMP_taskgroup_reduction_register)
36c0a68be4Smrg 
371debfc3dSmrg /* Initialize the given work share construct from the given arguments.  */
381debfc3dSmrg 
391debfc3dSmrg static inline void
401debfc3dSmrg gomp_loop_init (struct gomp_work_share *ws, long start, long end, long incr,
411debfc3dSmrg 		enum gomp_schedule_type sched, long chunk_size)
421debfc3dSmrg {
431debfc3dSmrg   ws->sched = sched;
441debfc3dSmrg   ws->chunk_size = chunk_size;
451debfc3dSmrg   /* Canonicalize loops that have zero iterations to ->next == ->end.  */
461debfc3dSmrg   ws->end = ((incr > 0 && start > end) || (incr < 0 && start < end))
471debfc3dSmrg 	    ? start : end;
481debfc3dSmrg   ws->incr = incr;
491debfc3dSmrg   ws->next = start;
501debfc3dSmrg   if (sched == GFS_DYNAMIC)
511debfc3dSmrg     {
521debfc3dSmrg       ws->chunk_size *= incr;
531debfc3dSmrg 
541debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
551debfc3dSmrg       {
561debfc3dSmrg 	/* For dynamic scheduling prepare things to make each iteration
571debfc3dSmrg 	   faster.  */
581debfc3dSmrg 	struct gomp_thread *thr = gomp_thread ();
591debfc3dSmrg 	struct gomp_team *team = thr->ts.team;
601debfc3dSmrg 	long nthreads = team ? team->nthreads : 1;
611debfc3dSmrg 
621debfc3dSmrg 	if (__builtin_expect (incr > 0, 1))
631debfc3dSmrg 	  {
641debfc3dSmrg 	    /* Cheap overflow protection.  */
651debfc3dSmrg 	    if (__builtin_expect ((nthreads | ws->chunk_size)
661debfc3dSmrg 				  >= 1UL << (sizeof (long)
671debfc3dSmrg 					     * __CHAR_BIT__ / 2 - 1), 0))
681debfc3dSmrg 	      ws->mode = 0;
691debfc3dSmrg 	    else
701debfc3dSmrg 	      ws->mode = ws->end < (LONG_MAX
711debfc3dSmrg 				    - (nthreads + 1) * ws->chunk_size);
721debfc3dSmrg 	  }
731debfc3dSmrg 	/* Cheap overflow protection.  */
741debfc3dSmrg 	else if (__builtin_expect ((nthreads | -ws->chunk_size)
751debfc3dSmrg 				   >= 1UL << (sizeof (long)
761debfc3dSmrg 					      * __CHAR_BIT__ / 2 - 1), 0))
771debfc3dSmrg 	  ws->mode = 0;
781debfc3dSmrg 	else
791debfc3dSmrg 	  ws->mode = ws->end > (nthreads + 1) * -ws->chunk_size - LONG_MAX;
801debfc3dSmrg       }
811debfc3dSmrg #endif
821debfc3dSmrg     }
831debfc3dSmrg }
841debfc3dSmrg 
851debfc3dSmrg /* The *_start routines are called when first encountering a loop construct
861debfc3dSmrg    that is not bound directly to a parallel construct.  The first thread
871debfc3dSmrg    that arrives will create the work-share construct; subsequent threads
881debfc3dSmrg    will see the construct exists and allocate work from it.
891debfc3dSmrg 
901debfc3dSmrg    START, END, INCR are the bounds of the loop; due to the restrictions of
911debfc3dSmrg    OpenMP, these values must be the same in every thread.  This is not
921debfc3dSmrg    verified (nor is it entirely verifiable, since START is not necessarily
931debfc3dSmrg    retained intact in the work-share data structure).  CHUNK_SIZE is the
941debfc3dSmrg    scheduling parameter; again this must be identical in all threads.
951debfc3dSmrg 
961debfc3dSmrg    Returns true if there's any work for this thread to perform.  If so,
971debfc3dSmrg    *ISTART and *IEND are filled with the bounds of the iteration block
981debfc3dSmrg    allocated to this thread.  Returns false if all work was assigned to
991debfc3dSmrg    other threads prior to this thread's arrival.  */
1001debfc3dSmrg 
1011debfc3dSmrg static bool
gomp_loop_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1021debfc3dSmrg gomp_loop_static_start (long start, long end, long incr, long chunk_size,
1031debfc3dSmrg 			long *istart, long *iend)
1041debfc3dSmrg {
1051debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
1061debfc3dSmrg 
1071debfc3dSmrg   thr->ts.static_trip = 0;
108c0a68be4Smrg   if (gomp_work_share_start (0))
1091debfc3dSmrg     {
1101debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1111debfc3dSmrg 		      GFS_STATIC, chunk_size);
1121debfc3dSmrg       gomp_work_share_init_done ();
1131debfc3dSmrg     }
1141debfc3dSmrg 
1151debfc3dSmrg   return !gomp_iter_static_next (istart, iend);
1161debfc3dSmrg }
1171debfc3dSmrg 
1181debfc3dSmrg /* The current dynamic implementation is always monotonic.  The
1191debfc3dSmrg    entrypoints without nonmonotonic in them have to be always monotonic,
1201debfc3dSmrg    but the nonmonotonic ones could be changed to use work-stealing for
1211debfc3dSmrg    improved scalability.  */
1221debfc3dSmrg 
1231debfc3dSmrg static bool
gomp_loop_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1241debfc3dSmrg gomp_loop_dynamic_start (long start, long end, long incr, long chunk_size,
1251debfc3dSmrg 			 long *istart, long *iend)
1261debfc3dSmrg {
1271debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
1281debfc3dSmrg   bool ret;
1291debfc3dSmrg 
130c0a68be4Smrg   if (gomp_work_share_start (0))
1311debfc3dSmrg     {
1321debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1331debfc3dSmrg 		      GFS_DYNAMIC, chunk_size);
1341debfc3dSmrg       gomp_work_share_init_done ();
1351debfc3dSmrg     }
1361debfc3dSmrg 
1371debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
1381debfc3dSmrg   ret = gomp_iter_dynamic_next (istart, iend);
1391debfc3dSmrg #else
1401debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
1411debfc3dSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
1421debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
1431debfc3dSmrg #endif
1441debfc3dSmrg 
1451debfc3dSmrg   return ret;
1461debfc3dSmrg }
1471debfc3dSmrg 
1481debfc3dSmrg /* Similarly as for dynamic, though the question is how can the chunk sizes
1491debfc3dSmrg    be decreased without a central locking or atomics.  */
1501debfc3dSmrg 
1511debfc3dSmrg static bool
gomp_loop_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)1521debfc3dSmrg gomp_loop_guided_start (long start, long end, long incr, long chunk_size,
1531debfc3dSmrg 			long *istart, long *iend)
1541debfc3dSmrg {
1551debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
1561debfc3dSmrg   bool ret;
1571debfc3dSmrg 
158c0a68be4Smrg   if (gomp_work_share_start (0))
1591debfc3dSmrg     {
1601debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
1611debfc3dSmrg 		      GFS_GUIDED, chunk_size);
1621debfc3dSmrg       gomp_work_share_init_done ();
1631debfc3dSmrg     }
1641debfc3dSmrg 
1651debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
1661debfc3dSmrg   ret = gomp_iter_guided_next (istart, iend);
1671debfc3dSmrg #else
1681debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
1691debfc3dSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
1701debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
1711debfc3dSmrg #endif
1721debfc3dSmrg 
1731debfc3dSmrg   return ret;
1741debfc3dSmrg }
1751debfc3dSmrg 
1761debfc3dSmrg bool
GOMP_loop_runtime_start(long start,long end,long incr,long * istart,long * iend)1771debfc3dSmrg GOMP_loop_runtime_start (long start, long end, long incr,
1781debfc3dSmrg 			 long *istart, long *iend)
1791debfc3dSmrg {
1801debfc3dSmrg   struct gomp_task_icv *icv = gomp_icv (false);
181c0a68be4Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
1821debfc3dSmrg     {
1831debfc3dSmrg     case GFS_STATIC:
1841debfc3dSmrg       return gomp_loop_static_start (start, end, incr,
1851debfc3dSmrg 				     icv->run_sched_chunk_size,
1861debfc3dSmrg 				     istart, iend);
1871debfc3dSmrg     case GFS_DYNAMIC:
1881debfc3dSmrg       return gomp_loop_dynamic_start (start, end, incr,
1891debfc3dSmrg 				      icv->run_sched_chunk_size,
1901debfc3dSmrg 				      istart, iend);
1911debfc3dSmrg     case GFS_GUIDED:
1921debfc3dSmrg       return gomp_loop_guided_start (start, end, incr,
1931debfc3dSmrg 				     icv->run_sched_chunk_size,
1941debfc3dSmrg 				     istart, iend);
1951debfc3dSmrg     case GFS_AUTO:
1961debfc3dSmrg       /* For now map to schedule(static), later on we could play with feedback
1971debfc3dSmrg 	 driven choice.  */
1981debfc3dSmrg       return gomp_loop_static_start (start, end, incr, 0, istart, iend);
1991debfc3dSmrg     default:
2001debfc3dSmrg       abort ();
2011debfc3dSmrg     }
2021debfc3dSmrg }
2031debfc3dSmrg 
204c0a68be4Smrg static long
gomp_adjust_sched(long sched,long * chunk_size)205c0a68be4Smrg gomp_adjust_sched (long sched, long *chunk_size)
206c0a68be4Smrg {
207c0a68be4Smrg   sched &= ~GFS_MONOTONIC;
208c0a68be4Smrg   switch (sched)
209c0a68be4Smrg     {
210c0a68be4Smrg     case GFS_STATIC:
211c0a68be4Smrg     case GFS_DYNAMIC:
212c0a68be4Smrg     case GFS_GUIDED:
213c0a68be4Smrg       return sched;
214c0a68be4Smrg     /* GFS_RUNTIME is used for runtime schedule without monotonic
215c0a68be4Smrg        or nonmonotonic modifiers on the clause.
216c0a68be4Smrg        GFS_RUNTIME|GFS_MONOTONIC for runtime schedule with monotonic
217c0a68be4Smrg        modifier.  */
218c0a68be4Smrg     case GFS_RUNTIME:
219c0a68be4Smrg     /* GFS_AUTO is used for runtime schedule with nonmonotonic
220c0a68be4Smrg        modifier.  */
221c0a68be4Smrg     case GFS_AUTO:
222c0a68be4Smrg       {
223c0a68be4Smrg 	struct gomp_task_icv *icv = gomp_icv (false);
224c0a68be4Smrg 	sched = icv->run_sched_var & ~GFS_MONOTONIC;
225c0a68be4Smrg 	switch (sched)
226c0a68be4Smrg 	  {
227c0a68be4Smrg 	  case GFS_STATIC:
228c0a68be4Smrg 	  case GFS_DYNAMIC:
229c0a68be4Smrg 	  case GFS_GUIDED:
230c0a68be4Smrg 	    *chunk_size = icv->run_sched_chunk_size;
231c0a68be4Smrg 	    break;
232c0a68be4Smrg 	  case GFS_AUTO:
233c0a68be4Smrg 	    sched = GFS_STATIC;
234c0a68be4Smrg 	    *chunk_size = 0;
235c0a68be4Smrg 	    break;
236c0a68be4Smrg 	  default:
237c0a68be4Smrg 	    abort ();
238c0a68be4Smrg 	  }
239c0a68be4Smrg 	return sched;
240c0a68be4Smrg       }
241c0a68be4Smrg     default:
242c0a68be4Smrg       abort ();
243c0a68be4Smrg     }
244c0a68be4Smrg }
245c0a68be4Smrg 
246c0a68be4Smrg bool
GOMP_loop_start(long start,long end,long incr,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)247c0a68be4Smrg GOMP_loop_start (long start, long end, long incr, long sched,
248c0a68be4Smrg 		 long chunk_size, long *istart, long *iend,
249c0a68be4Smrg 		 uintptr_t *reductions, void **mem)
250c0a68be4Smrg {
251c0a68be4Smrg   struct gomp_thread *thr = gomp_thread ();
252c0a68be4Smrg 
253c0a68be4Smrg   thr->ts.static_trip = 0;
254c0a68be4Smrg   if (reductions)
255c0a68be4Smrg     gomp_workshare_taskgroup_start ();
256c0a68be4Smrg   if (gomp_work_share_start (0))
257c0a68be4Smrg     {
258c0a68be4Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
259c0a68be4Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
260c0a68be4Smrg 		      sched, chunk_size);
261c0a68be4Smrg       if (reductions)
262c0a68be4Smrg 	{
263c0a68be4Smrg 	  GOMP_taskgroup_reduction_register (reductions);
264c0a68be4Smrg 	  thr->task->taskgroup->workshare = true;
265c0a68be4Smrg 	  thr->ts.work_share->task_reductions = reductions;
266c0a68be4Smrg 	}
267c0a68be4Smrg       if (mem)
268c0a68be4Smrg 	{
269c0a68be4Smrg 	  uintptr_t size = (uintptr_t) *mem;
270c0a68be4Smrg #define INLINE_ORDERED_TEAM_IDS_OFF \
271c0a68be4Smrg   ((offsetof (struct gomp_work_share, inline_ordered_team_ids)		\
272c0a68be4Smrg     + __alignof__ (long long) - 1) & ~(__alignof__ (long long) - 1))
273c0a68be4Smrg 	  if (size > (sizeof (struct gomp_work_share)
274c0a68be4Smrg 		      - INLINE_ORDERED_TEAM_IDS_OFF))
275c0a68be4Smrg 	    *mem
276c0a68be4Smrg 	      = (void *) (thr->ts.work_share->ordered_team_ids
277c0a68be4Smrg 			  = gomp_malloc_cleared (size));
278c0a68be4Smrg 	  else
279c0a68be4Smrg 	    *mem = memset (((char *) thr->ts.work_share)
280c0a68be4Smrg 			   + INLINE_ORDERED_TEAM_IDS_OFF, '\0', size);
281c0a68be4Smrg 	}
282c0a68be4Smrg       gomp_work_share_init_done ();
283c0a68be4Smrg     }
284c0a68be4Smrg   else
285c0a68be4Smrg     {
286c0a68be4Smrg       if (reductions)
287c0a68be4Smrg 	{
288c0a68be4Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
289c0a68be4Smrg 	  gomp_workshare_task_reduction_register (reductions,
290c0a68be4Smrg 						  first_reductions);
291c0a68be4Smrg 	}
292c0a68be4Smrg       if (mem)
293c0a68be4Smrg 	{
294c0a68be4Smrg 	  if ((offsetof (struct gomp_work_share, inline_ordered_team_ids)
295c0a68be4Smrg 	       & (__alignof__ (long long) - 1)) == 0)
296c0a68be4Smrg 	    *mem = (void *) thr->ts.work_share->ordered_team_ids;
297c0a68be4Smrg 	  else
298c0a68be4Smrg 	    {
299c0a68be4Smrg 	      uintptr_t p = (uintptr_t) thr->ts.work_share->ordered_team_ids;
300c0a68be4Smrg 	      p += __alignof__ (long long) - 1;
301c0a68be4Smrg 	      p &= ~(__alignof__ (long long) - 1);
302c0a68be4Smrg 	      *mem = (void *) p;
303c0a68be4Smrg 	    }
304c0a68be4Smrg 	}
305c0a68be4Smrg     }
306c0a68be4Smrg 
307c0a68be4Smrg   if (!istart)
308c0a68be4Smrg     return true;
309c0a68be4Smrg   return ialias_call (GOMP_loop_runtime_next) (istart, iend);
310c0a68be4Smrg }
311c0a68be4Smrg 
3121debfc3dSmrg /* The *_ordered_*_start routines are similar.  The only difference is that
3131debfc3dSmrg    this work-share construct is initialized to expect an ORDERED section.  */
3141debfc3dSmrg 
3151debfc3dSmrg static bool
gomp_loop_ordered_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3161debfc3dSmrg gomp_loop_ordered_static_start (long start, long end, long incr,
3171debfc3dSmrg 				long chunk_size, long *istart, long *iend)
3181debfc3dSmrg {
3191debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
3201debfc3dSmrg 
3211debfc3dSmrg   thr->ts.static_trip = 0;
322c0a68be4Smrg   if (gomp_work_share_start (1))
3231debfc3dSmrg     {
3241debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3251debfc3dSmrg 		      GFS_STATIC, chunk_size);
3261debfc3dSmrg       gomp_ordered_static_init ();
3271debfc3dSmrg       gomp_work_share_init_done ();
3281debfc3dSmrg     }
3291debfc3dSmrg 
3301debfc3dSmrg   return !gomp_iter_static_next (istart, iend);
3311debfc3dSmrg }
3321debfc3dSmrg 
3331debfc3dSmrg static bool
gomp_loop_ordered_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3341debfc3dSmrg gomp_loop_ordered_dynamic_start (long start, long end, long incr,
3351debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
3361debfc3dSmrg {
3371debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
3381debfc3dSmrg   bool ret;
3391debfc3dSmrg 
340c0a68be4Smrg   if (gomp_work_share_start (1))
3411debfc3dSmrg     {
3421debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3431debfc3dSmrg 		      GFS_DYNAMIC, chunk_size);
3441debfc3dSmrg       gomp_mutex_lock (&thr->ts.work_share->lock);
3451debfc3dSmrg       gomp_work_share_init_done ();
3461debfc3dSmrg     }
3471debfc3dSmrg   else
3481debfc3dSmrg     gomp_mutex_lock (&thr->ts.work_share->lock);
3491debfc3dSmrg 
3501debfc3dSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
3511debfc3dSmrg   if (ret)
3521debfc3dSmrg     gomp_ordered_first ();
3531debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
3541debfc3dSmrg 
3551debfc3dSmrg   return ret;
3561debfc3dSmrg }
3571debfc3dSmrg 
3581debfc3dSmrg static bool
gomp_loop_ordered_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)3591debfc3dSmrg gomp_loop_ordered_guided_start (long start, long end, long incr,
3601debfc3dSmrg 				long chunk_size, long *istart, long *iend)
3611debfc3dSmrg {
3621debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
3631debfc3dSmrg   bool ret;
3641debfc3dSmrg 
365c0a68be4Smrg   if (gomp_work_share_start (1))
3661debfc3dSmrg     {
3671debfc3dSmrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
3681debfc3dSmrg 		      GFS_GUIDED, chunk_size);
3691debfc3dSmrg       gomp_mutex_lock (&thr->ts.work_share->lock);
3701debfc3dSmrg       gomp_work_share_init_done ();
3711debfc3dSmrg     }
3721debfc3dSmrg   else
3731debfc3dSmrg     gomp_mutex_lock (&thr->ts.work_share->lock);
3741debfc3dSmrg 
3751debfc3dSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
3761debfc3dSmrg   if (ret)
3771debfc3dSmrg     gomp_ordered_first ();
3781debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
3791debfc3dSmrg 
3801debfc3dSmrg   return ret;
3811debfc3dSmrg }
3821debfc3dSmrg 
3831debfc3dSmrg bool
GOMP_loop_ordered_runtime_start(long start,long end,long incr,long * istart,long * iend)3841debfc3dSmrg GOMP_loop_ordered_runtime_start (long start, long end, long incr,
3851debfc3dSmrg 				 long *istart, long *iend)
3861debfc3dSmrg {
3871debfc3dSmrg   struct gomp_task_icv *icv = gomp_icv (false);
388c0a68be4Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
3891debfc3dSmrg     {
3901debfc3dSmrg     case GFS_STATIC:
3911debfc3dSmrg       return gomp_loop_ordered_static_start (start, end, incr,
3921debfc3dSmrg 					     icv->run_sched_chunk_size,
3931debfc3dSmrg 					     istart, iend);
3941debfc3dSmrg     case GFS_DYNAMIC:
3951debfc3dSmrg       return gomp_loop_ordered_dynamic_start (start, end, incr,
3961debfc3dSmrg 					      icv->run_sched_chunk_size,
3971debfc3dSmrg 					      istart, iend);
3981debfc3dSmrg     case GFS_GUIDED:
3991debfc3dSmrg       return gomp_loop_ordered_guided_start (start, end, incr,
4001debfc3dSmrg 					     icv->run_sched_chunk_size,
4011debfc3dSmrg 					     istart, iend);
4021debfc3dSmrg     case GFS_AUTO:
4031debfc3dSmrg       /* For now map to schedule(static), later on we could play with feedback
4041debfc3dSmrg 	 driven choice.  */
4051debfc3dSmrg       return gomp_loop_ordered_static_start (start, end, incr,
4061debfc3dSmrg 					     0, istart, iend);
4071debfc3dSmrg     default:
4081debfc3dSmrg       abort ();
4091debfc3dSmrg     }
4101debfc3dSmrg }
4111debfc3dSmrg 
412c0a68be4Smrg bool
GOMP_loop_ordered_start(long start,long end,long incr,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)413c0a68be4Smrg GOMP_loop_ordered_start (long start, long end, long incr, long sched,
414c0a68be4Smrg 			 long chunk_size, long *istart, long *iend,
415c0a68be4Smrg 			 uintptr_t *reductions, void **mem)
416c0a68be4Smrg {
417c0a68be4Smrg   struct gomp_thread *thr = gomp_thread ();
418c0a68be4Smrg   size_t ordered = 1;
419c0a68be4Smrg   bool ret;
420c0a68be4Smrg 
421c0a68be4Smrg   thr->ts.static_trip = 0;
422c0a68be4Smrg   if (reductions)
423c0a68be4Smrg     gomp_workshare_taskgroup_start ();
424c0a68be4Smrg   if (mem)
425c0a68be4Smrg     ordered += (uintptr_t) *mem;
426c0a68be4Smrg   if (gomp_work_share_start (ordered))
427c0a68be4Smrg     {
428c0a68be4Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
429c0a68be4Smrg       gomp_loop_init (thr->ts.work_share, start, end, incr,
430c0a68be4Smrg 		      sched, chunk_size);
431c0a68be4Smrg       if (reductions)
432c0a68be4Smrg 	{
433c0a68be4Smrg 	  GOMP_taskgroup_reduction_register (reductions);
434c0a68be4Smrg 	  thr->task->taskgroup->workshare = true;
435c0a68be4Smrg 	  thr->ts.work_share->task_reductions = reductions;
436c0a68be4Smrg 	}
437c0a68be4Smrg       if (sched == GFS_STATIC)
438c0a68be4Smrg 	gomp_ordered_static_init ();
439c0a68be4Smrg       else
440c0a68be4Smrg 	gomp_mutex_lock (&thr->ts.work_share->lock);
441c0a68be4Smrg       gomp_work_share_init_done ();
442c0a68be4Smrg     }
443c0a68be4Smrg   else
444c0a68be4Smrg     {
445c0a68be4Smrg       if (reductions)
446c0a68be4Smrg 	{
447c0a68be4Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
448c0a68be4Smrg 	  gomp_workshare_task_reduction_register (reductions,
449c0a68be4Smrg 						  first_reductions);
450c0a68be4Smrg 	}
451c0a68be4Smrg       sched = thr->ts.work_share->sched;
452c0a68be4Smrg       if (sched != GFS_STATIC)
453c0a68be4Smrg 	gomp_mutex_lock (&thr->ts.work_share->lock);
454c0a68be4Smrg     }
455c0a68be4Smrg 
456c0a68be4Smrg   if (mem)
457c0a68be4Smrg     {
458c0a68be4Smrg       uintptr_t p
459c0a68be4Smrg 	= (uintptr_t) (thr->ts.work_share->ordered_team_ids
460c0a68be4Smrg 		       + (thr->ts.team ? thr->ts.team->nthreads : 1));
461c0a68be4Smrg       p += __alignof__ (long long) - 1;
462c0a68be4Smrg       p &= ~(__alignof__ (long long) - 1);
463c0a68be4Smrg       *mem = (void *) p;
464c0a68be4Smrg     }
465c0a68be4Smrg 
466c0a68be4Smrg   switch (sched)
467c0a68be4Smrg     {
468c0a68be4Smrg     case GFS_STATIC:
469c0a68be4Smrg     case GFS_AUTO:
470c0a68be4Smrg       return !gomp_iter_static_next (istart, iend);
471c0a68be4Smrg     case GFS_DYNAMIC:
472c0a68be4Smrg       ret = gomp_iter_dynamic_next_locked (istart, iend);
473c0a68be4Smrg       break;
474c0a68be4Smrg     case GFS_GUIDED:
475c0a68be4Smrg       ret = gomp_iter_guided_next_locked (istart, iend);
476c0a68be4Smrg       break;
477c0a68be4Smrg     default:
478c0a68be4Smrg       abort ();
479c0a68be4Smrg     }
480c0a68be4Smrg 
481c0a68be4Smrg   if (ret)
482c0a68be4Smrg     gomp_ordered_first ();
483c0a68be4Smrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
484c0a68be4Smrg   return ret;
485c0a68be4Smrg }
486c0a68be4Smrg 
4871debfc3dSmrg /* The *_doacross_*_start routines are similar.  The only difference is that
4881debfc3dSmrg    this work-share construct is initialized to expect an ORDERED(N) - DOACROSS
4891debfc3dSmrg    section, and the worksharing loop iterates always from 0 to COUNTS[0] - 1
4901debfc3dSmrg    and other COUNTS array elements tell the library number of iterations
4911debfc3dSmrg    in the ordered inner loops.  */
4921debfc3dSmrg 
4931debfc3dSmrg static bool
gomp_loop_doacross_static_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)4941debfc3dSmrg gomp_loop_doacross_static_start (unsigned ncounts, long *counts,
4951debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
4961debfc3dSmrg {
4971debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
4981debfc3dSmrg 
4991debfc3dSmrg   thr->ts.static_trip = 0;
500c0a68be4Smrg   if (gomp_work_share_start (0))
5011debfc3dSmrg     {
5021debfc3dSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
5031debfc3dSmrg 		      GFS_STATIC, chunk_size);
504c0a68be4Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
5051debfc3dSmrg       gomp_work_share_init_done ();
5061debfc3dSmrg     }
5071debfc3dSmrg 
5081debfc3dSmrg   return !gomp_iter_static_next (istart, iend);
5091debfc3dSmrg }
5101debfc3dSmrg 
5111debfc3dSmrg static bool
gomp_loop_doacross_dynamic_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)5121debfc3dSmrg gomp_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
5131debfc3dSmrg 				  long chunk_size, long *istart, long *iend)
5141debfc3dSmrg {
5151debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
5161debfc3dSmrg   bool ret;
5171debfc3dSmrg 
518c0a68be4Smrg   if (gomp_work_share_start (0))
5191debfc3dSmrg     {
5201debfc3dSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
5211debfc3dSmrg 		      GFS_DYNAMIC, chunk_size);
522c0a68be4Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
5231debfc3dSmrg       gomp_work_share_init_done ();
5241debfc3dSmrg     }
5251debfc3dSmrg 
5261debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
5271debfc3dSmrg   ret = gomp_iter_dynamic_next (istart, iend);
5281debfc3dSmrg #else
5291debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
5301debfc3dSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
5311debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
5321debfc3dSmrg #endif
5331debfc3dSmrg 
5341debfc3dSmrg   return ret;
5351debfc3dSmrg }
5361debfc3dSmrg 
5371debfc3dSmrg static bool
gomp_loop_doacross_guided_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)5381debfc3dSmrg gomp_loop_doacross_guided_start (unsigned ncounts, long *counts,
5391debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
5401debfc3dSmrg {
5411debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
5421debfc3dSmrg   bool ret;
5431debfc3dSmrg 
544c0a68be4Smrg   if (gomp_work_share_start (0))
5451debfc3dSmrg     {
5461debfc3dSmrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
5471debfc3dSmrg 		      GFS_GUIDED, chunk_size);
548c0a68be4Smrg       gomp_doacross_init (ncounts, counts, chunk_size, 0);
5491debfc3dSmrg       gomp_work_share_init_done ();
5501debfc3dSmrg     }
5511debfc3dSmrg 
5521debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
5531debfc3dSmrg   ret = gomp_iter_guided_next (istart, iend);
5541debfc3dSmrg #else
5551debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
5561debfc3dSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
5571debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
5581debfc3dSmrg #endif
5591debfc3dSmrg 
5601debfc3dSmrg   return ret;
5611debfc3dSmrg }
5621debfc3dSmrg 
5631debfc3dSmrg bool
GOMP_loop_doacross_runtime_start(unsigned ncounts,long * counts,long * istart,long * iend)5641debfc3dSmrg GOMP_loop_doacross_runtime_start (unsigned ncounts, long *counts,
5651debfc3dSmrg 				  long *istart, long *iend)
5661debfc3dSmrg {
5671debfc3dSmrg   struct gomp_task_icv *icv = gomp_icv (false);
568c0a68be4Smrg   switch (icv->run_sched_var & ~GFS_MONOTONIC)
5691debfc3dSmrg     {
5701debfc3dSmrg     case GFS_STATIC:
5711debfc3dSmrg       return gomp_loop_doacross_static_start (ncounts, counts,
5721debfc3dSmrg 					      icv->run_sched_chunk_size,
5731debfc3dSmrg 					      istart, iend);
5741debfc3dSmrg     case GFS_DYNAMIC:
5751debfc3dSmrg       return gomp_loop_doacross_dynamic_start (ncounts, counts,
5761debfc3dSmrg 					       icv->run_sched_chunk_size,
5771debfc3dSmrg 					       istart, iend);
5781debfc3dSmrg     case GFS_GUIDED:
5791debfc3dSmrg       return gomp_loop_doacross_guided_start (ncounts, counts,
5801debfc3dSmrg 					      icv->run_sched_chunk_size,
5811debfc3dSmrg 					      istart, iend);
5821debfc3dSmrg     case GFS_AUTO:
5831debfc3dSmrg       /* For now map to schedule(static), later on we could play with feedback
5841debfc3dSmrg 	 driven choice.  */
5851debfc3dSmrg       return gomp_loop_doacross_static_start (ncounts, counts,
5861debfc3dSmrg 					      0, istart, iend);
5871debfc3dSmrg     default:
5881debfc3dSmrg       abort ();
5891debfc3dSmrg     }
5901debfc3dSmrg }
5911debfc3dSmrg 
592c0a68be4Smrg bool
GOMP_loop_doacross_start(unsigned ncounts,long * counts,long sched,long chunk_size,long * istart,long * iend,uintptr_t * reductions,void ** mem)593c0a68be4Smrg GOMP_loop_doacross_start (unsigned ncounts, long *counts, long sched,
594c0a68be4Smrg 			  long chunk_size, long *istart, long *iend,
595c0a68be4Smrg 			  uintptr_t *reductions, void **mem)
596c0a68be4Smrg {
597c0a68be4Smrg   struct gomp_thread *thr = gomp_thread ();
598c0a68be4Smrg 
599c0a68be4Smrg   thr->ts.static_trip = 0;
600c0a68be4Smrg   if (reductions)
601c0a68be4Smrg     gomp_workshare_taskgroup_start ();
602c0a68be4Smrg   if (gomp_work_share_start (0))
603c0a68be4Smrg     {
604c0a68be4Smrg       size_t extra = 0;
605c0a68be4Smrg       if (mem)
606c0a68be4Smrg 	extra = (uintptr_t) *mem;
607c0a68be4Smrg       sched = gomp_adjust_sched (sched, &chunk_size);
608c0a68be4Smrg       gomp_loop_init (thr->ts.work_share, 0, counts[0], 1,
609c0a68be4Smrg 		      sched, chunk_size);
610c0a68be4Smrg       gomp_doacross_init (ncounts, counts, chunk_size, extra);
611c0a68be4Smrg       if (reductions)
612c0a68be4Smrg 	{
613c0a68be4Smrg 	  GOMP_taskgroup_reduction_register (reductions);
614c0a68be4Smrg 	  thr->task->taskgroup->workshare = true;
615c0a68be4Smrg 	  thr->ts.work_share->task_reductions = reductions;
616c0a68be4Smrg 	}
617c0a68be4Smrg       gomp_work_share_init_done ();
618c0a68be4Smrg     }
619c0a68be4Smrg   else
620c0a68be4Smrg     {
621c0a68be4Smrg       if (reductions)
622c0a68be4Smrg 	{
623c0a68be4Smrg 	  uintptr_t *first_reductions = thr->ts.work_share->task_reductions;
624c0a68be4Smrg 	  gomp_workshare_task_reduction_register (reductions,
625c0a68be4Smrg 						  first_reductions);
626c0a68be4Smrg 	}
627c0a68be4Smrg       sched = thr->ts.work_share->sched;
628c0a68be4Smrg     }
629c0a68be4Smrg 
630c0a68be4Smrg   if (mem)
631c0a68be4Smrg     *mem = thr->ts.work_share->doacross->extra;
632c0a68be4Smrg 
633c0a68be4Smrg   return ialias_call (GOMP_loop_runtime_next) (istart, iend);
634c0a68be4Smrg }
635c0a68be4Smrg 
6361debfc3dSmrg /* The *_next routines are called when the thread completes processing of
6371debfc3dSmrg    the iteration block currently assigned to it.  If the work-share
6381debfc3dSmrg    construct is bound directly to a parallel construct, then the iteration
6391debfc3dSmrg    bounds may have been set up before the parallel.  In which case, this
6401debfc3dSmrg    may be the first iteration for the thread.
6411debfc3dSmrg 
6421debfc3dSmrg    Returns true if there is work remaining to be performed; *ISTART and
6431debfc3dSmrg    *IEND are filled with a new iteration block.  Returns false if all work
6441debfc3dSmrg    has been assigned.  */
6451debfc3dSmrg 
6461debfc3dSmrg static bool
gomp_loop_static_next(long * istart,long * iend)6471debfc3dSmrg gomp_loop_static_next (long *istart, long *iend)
6481debfc3dSmrg {
6491debfc3dSmrg   return !gomp_iter_static_next (istart, iend);
6501debfc3dSmrg }
6511debfc3dSmrg 
6521debfc3dSmrg static bool
gomp_loop_dynamic_next(long * istart,long * iend)6531debfc3dSmrg gomp_loop_dynamic_next (long *istart, long *iend)
6541debfc3dSmrg {
6551debfc3dSmrg   bool ret;
6561debfc3dSmrg 
6571debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
6581debfc3dSmrg   ret = gomp_iter_dynamic_next (istart, iend);
6591debfc3dSmrg #else
6601debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
6611debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
6621debfc3dSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
6631debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
6641debfc3dSmrg #endif
6651debfc3dSmrg 
6661debfc3dSmrg   return ret;
6671debfc3dSmrg }
6681debfc3dSmrg 
6691debfc3dSmrg static bool
gomp_loop_guided_next(long * istart,long * iend)6701debfc3dSmrg gomp_loop_guided_next (long *istart, long *iend)
6711debfc3dSmrg {
6721debfc3dSmrg   bool ret;
6731debfc3dSmrg 
6741debfc3dSmrg #ifdef HAVE_SYNC_BUILTINS
6751debfc3dSmrg   ret = gomp_iter_guided_next (istart, iend);
6761debfc3dSmrg #else
6771debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
6781debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
6791debfc3dSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
6801debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
6811debfc3dSmrg #endif
6821debfc3dSmrg 
6831debfc3dSmrg   return ret;
6841debfc3dSmrg }
6851debfc3dSmrg 
6861debfc3dSmrg bool
GOMP_loop_runtime_next(long * istart,long * iend)6871debfc3dSmrg GOMP_loop_runtime_next (long *istart, long *iend)
6881debfc3dSmrg {
6891debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
6901debfc3dSmrg 
6911debfc3dSmrg   switch (thr->ts.work_share->sched)
6921debfc3dSmrg     {
6931debfc3dSmrg     case GFS_STATIC:
6941debfc3dSmrg     case GFS_AUTO:
6951debfc3dSmrg       return gomp_loop_static_next (istart, iend);
6961debfc3dSmrg     case GFS_DYNAMIC:
6971debfc3dSmrg       return gomp_loop_dynamic_next (istart, iend);
6981debfc3dSmrg     case GFS_GUIDED:
6991debfc3dSmrg       return gomp_loop_guided_next (istart, iend);
7001debfc3dSmrg     default:
7011debfc3dSmrg       abort ();
7021debfc3dSmrg     }
7031debfc3dSmrg }
7041debfc3dSmrg 
7051debfc3dSmrg /* The *_ordered_*_next routines are called when the thread completes
7061debfc3dSmrg    processing of the iteration block currently assigned to it.
7071debfc3dSmrg 
7081debfc3dSmrg    Returns true if there is work remaining to be performed; *ISTART and
7091debfc3dSmrg    *IEND are filled with a new iteration block.  Returns false if all work
7101debfc3dSmrg    has been assigned.  */
7111debfc3dSmrg 
7121debfc3dSmrg static bool
gomp_loop_ordered_static_next(long * istart,long * iend)7131debfc3dSmrg gomp_loop_ordered_static_next (long *istart, long *iend)
7141debfc3dSmrg {
7151debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
7161debfc3dSmrg   int test;
7171debfc3dSmrg 
7181debfc3dSmrg   gomp_ordered_sync ();
7191debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7201debfc3dSmrg   test = gomp_iter_static_next (istart, iend);
7211debfc3dSmrg   if (test >= 0)
7221debfc3dSmrg     gomp_ordered_static_next ();
7231debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7241debfc3dSmrg 
7251debfc3dSmrg   return test == 0;
7261debfc3dSmrg }
7271debfc3dSmrg 
7281debfc3dSmrg static bool
gomp_loop_ordered_dynamic_next(long * istart,long * iend)7291debfc3dSmrg gomp_loop_ordered_dynamic_next (long *istart, long *iend)
7301debfc3dSmrg {
7311debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
7321debfc3dSmrg   bool ret;
7331debfc3dSmrg 
7341debfc3dSmrg   gomp_ordered_sync ();
7351debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7361debfc3dSmrg   ret = gomp_iter_dynamic_next_locked (istart, iend);
7371debfc3dSmrg   if (ret)
7381debfc3dSmrg     gomp_ordered_next ();
7391debfc3dSmrg   else
7401debfc3dSmrg     gomp_ordered_last ();
7411debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7421debfc3dSmrg 
7431debfc3dSmrg   return ret;
7441debfc3dSmrg }
7451debfc3dSmrg 
7461debfc3dSmrg static bool
gomp_loop_ordered_guided_next(long * istart,long * iend)7471debfc3dSmrg gomp_loop_ordered_guided_next (long *istart, long *iend)
7481debfc3dSmrg {
7491debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
7501debfc3dSmrg   bool ret;
7511debfc3dSmrg 
7521debfc3dSmrg   gomp_ordered_sync ();
7531debfc3dSmrg   gomp_mutex_lock (&thr->ts.work_share->lock);
7541debfc3dSmrg   ret = gomp_iter_guided_next_locked (istart, iend);
7551debfc3dSmrg   if (ret)
7561debfc3dSmrg     gomp_ordered_next ();
7571debfc3dSmrg   else
7581debfc3dSmrg     gomp_ordered_last ();
7591debfc3dSmrg   gomp_mutex_unlock (&thr->ts.work_share->lock);
7601debfc3dSmrg 
7611debfc3dSmrg   return ret;
7621debfc3dSmrg }
7631debfc3dSmrg 
7641debfc3dSmrg bool
GOMP_loop_ordered_runtime_next(long * istart,long * iend)7651debfc3dSmrg GOMP_loop_ordered_runtime_next (long *istart, long *iend)
7661debfc3dSmrg {
7671debfc3dSmrg   struct gomp_thread *thr = gomp_thread ();
7681debfc3dSmrg 
7691debfc3dSmrg   switch (thr->ts.work_share->sched)
7701debfc3dSmrg     {
7711debfc3dSmrg     case GFS_STATIC:
7721debfc3dSmrg     case GFS_AUTO:
7731debfc3dSmrg       return gomp_loop_ordered_static_next (istart, iend);
7741debfc3dSmrg     case GFS_DYNAMIC:
7751debfc3dSmrg       return gomp_loop_ordered_dynamic_next (istart, iend);
7761debfc3dSmrg     case GFS_GUIDED:
7771debfc3dSmrg       return gomp_loop_ordered_guided_next (istart, iend);
7781debfc3dSmrg     default:
7791debfc3dSmrg       abort ();
7801debfc3dSmrg     }
7811debfc3dSmrg }
7821debfc3dSmrg 
7831debfc3dSmrg /* The GOMP_parallel_loop_* routines pre-initialize a work-share construct
7841debfc3dSmrg    to avoid one synchronization once we get into the loop.  */
7851debfc3dSmrg 
7861debfc3dSmrg static void
gomp_parallel_loop_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,enum gomp_schedule_type sched,long chunk_size,unsigned int flags)7871debfc3dSmrg gomp_parallel_loop_start (void (*fn) (void *), void *data,
7881debfc3dSmrg 			  unsigned num_threads, long start, long end,
7891debfc3dSmrg 			  long incr, enum gomp_schedule_type sched,
7901debfc3dSmrg 			  long chunk_size, unsigned int flags)
7911debfc3dSmrg {
7921debfc3dSmrg   struct gomp_team *team;
7931debfc3dSmrg 
7941debfc3dSmrg   num_threads = gomp_resolve_num_threads (num_threads, 0);
7951debfc3dSmrg   team = gomp_new_team (num_threads);
7961debfc3dSmrg   gomp_loop_init (&team->work_shares[0], start, end, incr, sched, chunk_size);
797c0a68be4Smrg   gomp_team_start (fn, data, num_threads, flags, team, NULL);
7981debfc3dSmrg }
7991debfc3dSmrg 
8001debfc3dSmrg void
GOMP_parallel_loop_static_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8011debfc3dSmrg GOMP_parallel_loop_static_start (void (*fn) (void *), void *data,
8021debfc3dSmrg 				 unsigned num_threads, long start, long end,
8031debfc3dSmrg 				 long incr, long chunk_size)
8041debfc3dSmrg {
8051debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8061debfc3dSmrg 			    GFS_STATIC, chunk_size, 0);
8071debfc3dSmrg }
8081debfc3dSmrg 
8091debfc3dSmrg void
GOMP_parallel_loop_dynamic_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8101debfc3dSmrg GOMP_parallel_loop_dynamic_start (void (*fn) (void *), void *data,
8111debfc3dSmrg 				  unsigned num_threads, long start, long end,
8121debfc3dSmrg 				  long incr, long chunk_size)
8131debfc3dSmrg {
8141debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8151debfc3dSmrg 			    GFS_DYNAMIC, chunk_size, 0);
8161debfc3dSmrg }
8171debfc3dSmrg 
8181debfc3dSmrg void
GOMP_parallel_loop_guided_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size)8191debfc3dSmrg GOMP_parallel_loop_guided_start (void (*fn) (void *), void *data,
8201debfc3dSmrg 				 unsigned num_threads, long start, long end,
8211debfc3dSmrg 				 long incr, long chunk_size)
8221debfc3dSmrg {
8231debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8241debfc3dSmrg 			    GFS_GUIDED, chunk_size, 0);
8251debfc3dSmrg }
8261debfc3dSmrg 
8271debfc3dSmrg void
GOMP_parallel_loop_runtime_start(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr)8281debfc3dSmrg GOMP_parallel_loop_runtime_start (void (*fn) (void *), void *data,
8291debfc3dSmrg 				  unsigned num_threads, long start, long end,
8301debfc3dSmrg 				  long incr)
8311debfc3dSmrg {
8321debfc3dSmrg   struct gomp_task_icv *icv = gomp_icv (false);
8331debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
834c0a68be4Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
835c0a68be4Smrg 			    icv->run_sched_chunk_size, 0);
8361debfc3dSmrg }
8371debfc3dSmrg 
ialias_redirect(GOMP_parallel_end)8381debfc3dSmrg ialias_redirect (GOMP_parallel_end)
8391debfc3dSmrg 
8401debfc3dSmrg void
8411debfc3dSmrg GOMP_parallel_loop_static (void (*fn) (void *), void *data,
8421debfc3dSmrg 			   unsigned num_threads, long start, long end,
8431debfc3dSmrg 			   long incr, long chunk_size, unsigned flags)
8441debfc3dSmrg {
8451debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8461debfc3dSmrg 			    GFS_STATIC, chunk_size, flags);
8471debfc3dSmrg   fn (data);
8481debfc3dSmrg   GOMP_parallel_end ();
8491debfc3dSmrg }
8501debfc3dSmrg 
8511debfc3dSmrg void
GOMP_parallel_loop_dynamic(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)8521debfc3dSmrg GOMP_parallel_loop_dynamic (void (*fn) (void *), void *data,
8531debfc3dSmrg 			    unsigned num_threads, long start, long end,
8541debfc3dSmrg 			    long incr, long chunk_size, unsigned flags)
8551debfc3dSmrg {
8561debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8571debfc3dSmrg 			    GFS_DYNAMIC, chunk_size, flags);
8581debfc3dSmrg   fn (data);
8591debfc3dSmrg   GOMP_parallel_end ();
8601debfc3dSmrg }
8611debfc3dSmrg 
8621debfc3dSmrg void
GOMP_parallel_loop_guided(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)8631debfc3dSmrg GOMP_parallel_loop_guided (void (*fn) (void *), void *data,
8641debfc3dSmrg 			  unsigned num_threads, long start, long end,
8651debfc3dSmrg 			  long incr, long chunk_size, unsigned flags)
8661debfc3dSmrg {
8671debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
8681debfc3dSmrg 			    GFS_GUIDED, chunk_size, flags);
8691debfc3dSmrg   fn (data);
8701debfc3dSmrg   GOMP_parallel_end ();
8711debfc3dSmrg }
8721debfc3dSmrg 
873c0a68be4Smrg void
GOMP_parallel_loop_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)874c0a68be4Smrg GOMP_parallel_loop_runtime (void (*fn) (void *), void *data,
875c0a68be4Smrg 			    unsigned num_threads, long start, long end,
876c0a68be4Smrg 			    long incr, unsigned flags)
877c0a68be4Smrg {
878c0a68be4Smrg   struct gomp_task_icv *icv = gomp_icv (false);
879c0a68be4Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
880c0a68be4Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
881c0a68be4Smrg 			    icv->run_sched_chunk_size, flags);
882c0a68be4Smrg   fn (data);
883c0a68be4Smrg   GOMP_parallel_end ();
884c0a68be4Smrg }
885c0a68be4Smrg 
8861debfc3dSmrg #ifdef HAVE_ATTRIBUTE_ALIAS
8871debfc3dSmrg extern __typeof(GOMP_parallel_loop_dynamic) GOMP_parallel_loop_nonmonotonic_dynamic
8881debfc3dSmrg 	__attribute__((alias ("GOMP_parallel_loop_dynamic")));
8891debfc3dSmrg extern __typeof(GOMP_parallel_loop_guided) GOMP_parallel_loop_nonmonotonic_guided
8901debfc3dSmrg 	__attribute__((alias ("GOMP_parallel_loop_guided")));
891c0a68be4Smrg extern __typeof(GOMP_parallel_loop_runtime) GOMP_parallel_loop_nonmonotonic_runtime
892c0a68be4Smrg 	__attribute__((alias ("GOMP_parallel_loop_runtime")));
893c0a68be4Smrg extern __typeof(GOMP_parallel_loop_runtime) GOMP_parallel_loop_maybe_nonmonotonic_runtime
894c0a68be4Smrg 	__attribute__((alias ("GOMP_parallel_loop_runtime")));
8951debfc3dSmrg #else
8961debfc3dSmrg void
GOMP_parallel_loop_nonmonotonic_dynamic(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)8971debfc3dSmrg GOMP_parallel_loop_nonmonotonic_dynamic (void (*fn) (void *), void *data,
8981debfc3dSmrg 					 unsigned num_threads, long start,
8991debfc3dSmrg 					 long end, long incr, long chunk_size,
9001debfc3dSmrg 					 unsigned flags)
9011debfc3dSmrg {
9021debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
9031debfc3dSmrg 			    GFS_DYNAMIC, chunk_size, flags);
9041debfc3dSmrg   fn (data);
9051debfc3dSmrg   GOMP_parallel_end ();
9061debfc3dSmrg }
9071debfc3dSmrg 
9081debfc3dSmrg void
GOMP_parallel_loop_nonmonotonic_guided(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,long chunk_size,unsigned flags)9091debfc3dSmrg GOMP_parallel_loop_nonmonotonic_guided (void (*fn) (void *), void *data,
9101debfc3dSmrg 					unsigned num_threads, long start,
9111debfc3dSmrg 					long end, long incr, long chunk_size,
9121debfc3dSmrg 					unsigned flags)
9131debfc3dSmrg {
9141debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
9151debfc3dSmrg 			    GFS_GUIDED, chunk_size, flags);
9161debfc3dSmrg   fn (data);
9171debfc3dSmrg   GOMP_parallel_end ();
9181debfc3dSmrg }
9191debfc3dSmrg 
9201debfc3dSmrg void
GOMP_parallel_loop_nonmonotonic_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)921c0a68be4Smrg GOMP_parallel_loop_nonmonotonic_runtime (void (*fn) (void *), void *data,
922c0a68be4Smrg 					 unsigned num_threads, long start,
923c0a68be4Smrg 					 long end, long incr, unsigned flags)
9241debfc3dSmrg {
9251debfc3dSmrg   struct gomp_task_icv *icv = gomp_icv (false);
9261debfc3dSmrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
927c0a68be4Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
928c0a68be4Smrg 			    icv->run_sched_chunk_size, flags);
9291debfc3dSmrg   fn (data);
9301debfc3dSmrg   GOMP_parallel_end ();
9311debfc3dSmrg }
9321debfc3dSmrg 
933c0a68be4Smrg void
GOMP_parallel_loop_maybe_nonmonotonic_runtime(void (* fn)(void *),void * data,unsigned num_threads,long start,long end,long incr,unsigned flags)934c0a68be4Smrg GOMP_parallel_loop_maybe_nonmonotonic_runtime (void (*fn) (void *), void *data,
935c0a68be4Smrg 					       unsigned num_threads, long start,
936c0a68be4Smrg 					       long end, long incr,
937c0a68be4Smrg 					       unsigned flags)
938c0a68be4Smrg {
939c0a68be4Smrg   struct gomp_task_icv *icv = gomp_icv (false);
940c0a68be4Smrg   gomp_parallel_loop_start (fn, data, num_threads, start, end, incr,
941c0a68be4Smrg 			    icv->run_sched_var & ~GFS_MONOTONIC,
942c0a68be4Smrg 			    icv->run_sched_chunk_size, flags);
943c0a68be4Smrg   fn (data);
944c0a68be4Smrg   GOMP_parallel_end ();
945c0a68be4Smrg }
946c0a68be4Smrg #endif
947c0a68be4Smrg 
9481debfc3dSmrg /* The GOMP_loop_end* routines are called after the thread is told that
9491debfc3dSmrg    all loop iterations are complete.  The first two versions synchronize
9501debfc3dSmrg    all threads; the nowait version does not.  */
9511debfc3dSmrg 
9521debfc3dSmrg void
GOMP_loop_end(void)9531debfc3dSmrg GOMP_loop_end (void)
9541debfc3dSmrg {
9551debfc3dSmrg   gomp_work_share_end ();
9561debfc3dSmrg }
9571debfc3dSmrg 
9581debfc3dSmrg bool
GOMP_loop_end_cancel(void)9591debfc3dSmrg GOMP_loop_end_cancel (void)
9601debfc3dSmrg {
9611debfc3dSmrg   return gomp_work_share_end_cancel ();
9621debfc3dSmrg }
9631debfc3dSmrg 
9641debfc3dSmrg void
GOMP_loop_end_nowait(void)9651debfc3dSmrg GOMP_loop_end_nowait (void)
9661debfc3dSmrg {
9671debfc3dSmrg   gomp_work_share_end_nowait ();
9681debfc3dSmrg }
9691debfc3dSmrg 
9701debfc3dSmrg 
9711debfc3dSmrg /* We use static functions above so that we're sure that the "runtime"
9721debfc3dSmrg    function can defer to the proper routine without interposition.  We
9731debfc3dSmrg    export the static function with a strong alias when possible, or with
9741debfc3dSmrg    a wrapper function otherwise.  */
9751debfc3dSmrg 
9761debfc3dSmrg #ifdef HAVE_ATTRIBUTE_ALIAS
9771debfc3dSmrg extern __typeof(gomp_loop_static_start) GOMP_loop_static_start
9781debfc3dSmrg 	__attribute__((alias ("gomp_loop_static_start")));
9791debfc3dSmrg extern __typeof(gomp_loop_dynamic_start) GOMP_loop_dynamic_start
9801debfc3dSmrg 	__attribute__((alias ("gomp_loop_dynamic_start")));
9811debfc3dSmrg extern __typeof(gomp_loop_guided_start) GOMP_loop_guided_start
9821debfc3dSmrg 	__attribute__((alias ("gomp_loop_guided_start")));
9831debfc3dSmrg extern __typeof(gomp_loop_dynamic_start) GOMP_loop_nonmonotonic_dynamic_start
9841debfc3dSmrg 	__attribute__((alias ("gomp_loop_dynamic_start")));
9851debfc3dSmrg extern __typeof(gomp_loop_guided_start) GOMP_loop_nonmonotonic_guided_start
9861debfc3dSmrg 	__attribute__((alias ("gomp_loop_guided_start")));
987c0a68be4Smrg extern __typeof(GOMP_loop_runtime_start) GOMP_loop_nonmonotonic_runtime_start
988c0a68be4Smrg 	__attribute__((alias ("GOMP_loop_runtime_start")));
989c0a68be4Smrg extern __typeof(GOMP_loop_runtime_start) GOMP_loop_maybe_nonmonotonic_runtime_start
990c0a68be4Smrg 	__attribute__((alias ("GOMP_loop_runtime_start")));
9911debfc3dSmrg 
9921debfc3dSmrg extern __typeof(gomp_loop_ordered_static_start) GOMP_loop_ordered_static_start
9931debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_static_start")));
9941debfc3dSmrg extern __typeof(gomp_loop_ordered_dynamic_start) GOMP_loop_ordered_dynamic_start
9951debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_dynamic_start")));
9961debfc3dSmrg extern __typeof(gomp_loop_ordered_guided_start) GOMP_loop_ordered_guided_start
9971debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_guided_start")));
9981debfc3dSmrg 
9991debfc3dSmrg extern __typeof(gomp_loop_doacross_static_start) GOMP_loop_doacross_static_start
10001debfc3dSmrg 	__attribute__((alias ("gomp_loop_doacross_static_start")));
10011debfc3dSmrg extern __typeof(gomp_loop_doacross_dynamic_start) GOMP_loop_doacross_dynamic_start
10021debfc3dSmrg 	__attribute__((alias ("gomp_loop_doacross_dynamic_start")));
10031debfc3dSmrg extern __typeof(gomp_loop_doacross_guided_start) GOMP_loop_doacross_guided_start
10041debfc3dSmrg 	__attribute__((alias ("gomp_loop_doacross_guided_start")));
10051debfc3dSmrg 
10061debfc3dSmrg extern __typeof(gomp_loop_static_next) GOMP_loop_static_next
10071debfc3dSmrg 	__attribute__((alias ("gomp_loop_static_next")));
10081debfc3dSmrg extern __typeof(gomp_loop_dynamic_next) GOMP_loop_dynamic_next
10091debfc3dSmrg 	__attribute__((alias ("gomp_loop_dynamic_next")));
10101debfc3dSmrg extern __typeof(gomp_loop_guided_next) GOMP_loop_guided_next
10111debfc3dSmrg 	__attribute__((alias ("gomp_loop_guided_next")));
10121debfc3dSmrg extern __typeof(gomp_loop_dynamic_next) GOMP_loop_nonmonotonic_dynamic_next
10131debfc3dSmrg 	__attribute__((alias ("gomp_loop_dynamic_next")));
10141debfc3dSmrg extern __typeof(gomp_loop_guided_next) GOMP_loop_nonmonotonic_guided_next
10151debfc3dSmrg 	__attribute__((alias ("gomp_loop_guided_next")));
1016c0a68be4Smrg extern __typeof(GOMP_loop_runtime_next) GOMP_loop_nonmonotonic_runtime_next
1017c0a68be4Smrg 	__attribute__((alias ("GOMP_loop_runtime_next")));
1018c0a68be4Smrg extern __typeof(GOMP_loop_runtime_next) GOMP_loop_maybe_nonmonotonic_runtime_next
1019c0a68be4Smrg 	__attribute__((alias ("GOMP_loop_runtime_next")));
10201debfc3dSmrg 
10211debfc3dSmrg extern __typeof(gomp_loop_ordered_static_next) GOMP_loop_ordered_static_next
10221debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_static_next")));
10231debfc3dSmrg extern __typeof(gomp_loop_ordered_dynamic_next) GOMP_loop_ordered_dynamic_next
10241debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_dynamic_next")));
10251debfc3dSmrg extern __typeof(gomp_loop_ordered_guided_next) GOMP_loop_ordered_guided_next
10261debfc3dSmrg 	__attribute__((alias ("gomp_loop_ordered_guided_next")));
10271debfc3dSmrg #else
10281debfc3dSmrg bool
GOMP_loop_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10291debfc3dSmrg GOMP_loop_static_start (long start, long end, long incr, long chunk_size,
10301debfc3dSmrg 			long *istart, long *iend)
10311debfc3dSmrg {
10321debfc3dSmrg   return gomp_loop_static_start (start, end, incr, chunk_size, istart, iend);
10331debfc3dSmrg }
10341debfc3dSmrg 
10351debfc3dSmrg bool
GOMP_loop_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10361debfc3dSmrg GOMP_loop_dynamic_start (long start, long end, long incr, long chunk_size,
10371debfc3dSmrg 			 long *istart, long *iend)
10381debfc3dSmrg {
10391debfc3dSmrg   return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
10401debfc3dSmrg }
10411debfc3dSmrg 
10421debfc3dSmrg bool
GOMP_loop_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10431debfc3dSmrg GOMP_loop_guided_start (long start, long end, long incr, long chunk_size,
10441debfc3dSmrg 			long *istart, long *iend)
10451debfc3dSmrg {
10461debfc3dSmrg   return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
10471debfc3dSmrg }
10481debfc3dSmrg 
10491debfc3dSmrg bool
GOMP_loop_nonmonotonic_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10501debfc3dSmrg GOMP_loop_nonmonotonic_dynamic_start (long start, long end, long incr,
10511debfc3dSmrg 				      long chunk_size, long *istart,
10521debfc3dSmrg 				      long *iend)
10531debfc3dSmrg {
10541debfc3dSmrg   return gomp_loop_dynamic_start (start, end, incr, chunk_size, istart, iend);
10551debfc3dSmrg }
10561debfc3dSmrg 
10571debfc3dSmrg bool
GOMP_loop_nonmonotonic_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10581debfc3dSmrg GOMP_loop_nonmonotonic_guided_start (long start, long end, long incr,
10591debfc3dSmrg 				     long chunk_size, long *istart, long *iend)
10601debfc3dSmrg {
10611debfc3dSmrg   return gomp_loop_guided_start (start, end, incr, chunk_size, istart, iend);
10621debfc3dSmrg }
10631debfc3dSmrg 
10641debfc3dSmrg bool
GOMP_loop_nonmonotonic_runtime_start(long start,long end,long incr,long * istart,long * iend)1065c0a68be4Smrg GOMP_loop_nonmonotonic_runtime_start (long start, long end, long incr,
1066c0a68be4Smrg 				      long *istart, long *iend)
1067c0a68be4Smrg {
1068c0a68be4Smrg   return GOMP_loop_runtime_start (start, end, incr, istart, iend);
1069c0a68be4Smrg }
1070c0a68be4Smrg 
1071c0a68be4Smrg bool
GOMP_loop_maybe_nonmonotonic_runtime_start(long start,long end,long incr,long * istart,long * iend)1072c0a68be4Smrg GOMP_loop_maybe_nonmonotonic_runtime_start (long start, long end, long incr,
1073c0a68be4Smrg 					    long *istart, long *iend)
1074c0a68be4Smrg {
1075c0a68be4Smrg   return GOMP_loop_runtime_start (start, end, incr, istart, iend);
1076c0a68be4Smrg }
1077c0a68be4Smrg 
1078c0a68be4Smrg bool
GOMP_loop_ordered_static_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10791debfc3dSmrg GOMP_loop_ordered_static_start (long start, long end, long incr,
10801debfc3dSmrg 				long chunk_size, long *istart, long *iend)
10811debfc3dSmrg {
10821debfc3dSmrg   return gomp_loop_ordered_static_start (start, end, incr, chunk_size,
10831debfc3dSmrg 					 istart, iend);
10841debfc3dSmrg }
10851debfc3dSmrg 
10861debfc3dSmrg bool
GOMP_loop_ordered_dynamic_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10871debfc3dSmrg GOMP_loop_ordered_dynamic_start (long start, long end, long incr,
10881debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
10891debfc3dSmrg {
10901debfc3dSmrg   return gomp_loop_ordered_dynamic_start (start, end, incr, chunk_size,
10911debfc3dSmrg 					  istart, iend);
10921debfc3dSmrg }
10931debfc3dSmrg 
10941debfc3dSmrg bool
GOMP_loop_ordered_guided_start(long start,long end,long incr,long chunk_size,long * istart,long * iend)10951debfc3dSmrg GOMP_loop_ordered_guided_start (long start, long end, long incr,
10961debfc3dSmrg 				long chunk_size, long *istart, long *iend)
10971debfc3dSmrg {
10981debfc3dSmrg   return gomp_loop_ordered_guided_start (start, end, incr, chunk_size,
10991debfc3dSmrg 					 istart, iend);
11001debfc3dSmrg }
11011debfc3dSmrg 
11021debfc3dSmrg bool
GOMP_loop_doacross_static_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)11031debfc3dSmrg GOMP_loop_doacross_static_start (unsigned ncounts, long *counts,
11041debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
11051debfc3dSmrg {
11061debfc3dSmrg   return gomp_loop_doacross_static_start (ncounts, counts, chunk_size,
11071debfc3dSmrg 					  istart, iend);
11081debfc3dSmrg }
11091debfc3dSmrg 
11101debfc3dSmrg bool
GOMP_loop_doacross_dynamic_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)11111debfc3dSmrg GOMP_loop_doacross_dynamic_start (unsigned ncounts, long *counts,
11121debfc3dSmrg 				  long chunk_size, long *istart, long *iend)
11131debfc3dSmrg {
11141debfc3dSmrg   return gomp_loop_doacross_dynamic_start (ncounts, counts, chunk_size,
11151debfc3dSmrg 					   istart, iend);
11161debfc3dSmrg }
11171debfc3dSmrg 
11181debfc3dSmrg bool
GOMP_loop_doacross_guided_start(unsigned ncounts,long * counts,long chunk_size,long * istart,long * iend)11191debfc3dSmrg GOMP_loop_doacross_guided_start (unsigned ncounts, long *counts,
11201debfc3dSmrg 				 long chunk_size, long *istart, long *iend)
11211debfc3dSmrg {
11221debfc3dSmrg   return gomp_loop_doacross_guided_start (ncounts, counts, chunk_size,
11231debfc3dSmrg 					  istart, iend);
11241debfc3dSmrg }
11251debfc3dSmrg 
11261debfc3dSmrg bool
GOMP_loop_static_next(long * istart,long * iend)11271debfc3dSmrg GOMP_loop_static_next (long *istart, long *iend)
11281debfc3dSmrg {
11291debfc3dSmrg   return gomp_loop_static_next (istart, iend);
11301debfc3dSmrg }
11311debfc3dSmrg 
11321debfc3dSmrg bool
GOMP_loop_dynamic_next(long * istart,long * iend)11331debfc3dSmrg GOMP_loop_dynamic_next (long *istart, long *iend)
11341debfc3dSmrg {
11351debfc3dSmrg   return gomp_loop_dynamic_next (istart, iend);
11361debfc3dSmrg }
11371debfc3dSmrg 
11381debfc3dSmrg bool
GOMP_loop_guided_next(long * istart,long * iend)11391debfc3dSmrg GOMP_loop_guided_next (long *istart, long *iend)
11401debfc3dSmrg {
11411debfc3dSmrg   return gomp_loop_guided_next (istart, iend);
11421debfc3dSmrg }
11431debfc3dSmrg 
11441debfc3dSmrg bool
GOMP_loop_nonmonotonic_dynamic_next(long * istart,long * iend)11451debfc3dSmrg GOMP_loop_nonmonotonic_dynamic_next (long *istart, long *iend)
11461debfc3dSmrg {
11471debfc3dSmrg   return gomp_loop_dynamic_next (istart, iend);
11481debfc3dSmrg }
11491debfc3dSmrg 
11501debfc3dSmrg bool
GOMP_loop_nonmonotonic_guided_next(long * istart,long * iend)11511debfc3dSmrg GOMP_loop_nonmonotonic_guided_next (long *istart, long *iend)
11521debfc3dSmrg {
11531debfc3dSmrg   return gomp_loop_guided_next (istart, iend);
11541debfc3dSmrg }
11551debfc3dSmrg 
11561debfc3dSmrg bool
GOMP_loop_nonmonotonic_runtime_next(long * istart,long * iend)1157c0a68be4Smrg GOMP_loop_nonmonotonic_runtime_next (long *istart, long *iend)
1158c0a68be4Smrg {
1159c0a68be4Smrg   return GOMP_loop_runtime_next (istart, iend);
1160c0a68be4Smrg }
1161c0a68be4Smrg 
1162c0a68be4Smrg bool
GOMP_loop_maybe_nonmonotonic_runtime_next(long * istart,long * iend)1163c0a68be4Smrg GOMP_loop_maybe_nonmonotonic_runtime_next (long *istart, long *iend)
1164c0a68be4Smrg {
1165c0a68be4Smrg   return GOMP_loop_runtime_next (istart, iend);
1166c0a68be4Smrg }
1167c0a68be4Smrg 
1168c0a68be4Smrg bool
GOMP_loop_ordered_static_next(long * istart,long * iend)11691debfc3dSmrg GOMP_loop_ordered_static_next (long *istart, long *iend)
11701debfc3dSmrg {
11711debfc3dSmrg   return gomp_loop_ordered_static_next (istart, iend);
11721debfc3dSmrg }
11731debfc3dSmrg 
11741debfc3dSmrg bool
GOMP_loop_ordered_dynamic_next(long * istart,long * iend)11751debfc3dSmrg GOMP_loop_ordered_dynamic_next (long *istart, long *iend)
11761debfc3dSmrg {
11771debfc3dSmrg   return gomp_loop_ordered_dynamic_next (istart, iend);
11781debfc3dSmrg }
11791debfc3dSmrg 
11801debfc3dSmrg bool
GOMP_loop_ordered_guided_next(long * istart,long * iend)11811debfc3dSmrg GOMP_loop_ordered_guided_next (long *istart, long *iend)
11821debfc3dSmrg {
11831debfc3dSmrg   return gomp_loop_ordered_guided_next (istart, iend);
11841debfc3dSmrg }
11851debfc3dSmrg #endif
1186