Lines Matching defs:chunk

174 // type of schedule,chunk.  The loop description is found in lb (lower bound),
191 typename traits_t<T>::signed_t chunk,
210 "schedule:%%d chunk:%%%s nproc:%%%s tid:%%%s\n",
214 KD_TRACE(10, (buff, gtid, pr, lb, ub, st, schedule, chunk, nproc, tid));
279 // Use the chunk size specified by OMP_SCHEDULE (or default if not
281 chunk = team->t.t_sched.chunk;
284 *cur_chunk = chunk;
291 "schedule:%%d chunk:%%%s\n",
293 KD_TRACE(10, (buff, gtid, schedule, chunk));
301 if (chunk <= 0) {
302 chunk = KMP_DEFAULT_CHUNK;
315 "schedule:%%d chunk:%%%s\n",
317 KD_TRACE(10, (buff, gtid, schedule, chunk));
335 // compiler provides simd_width in the chunk parameter
348 chunk = team->t.t_sched.chunk * chunk;
352 *cur_chunk = chunk;
360 " chunk:%%%s\n",
362 KD_TRACE(10, (buff, gtid, schedule, chunk));
367 pr->u.p.parm1 = chunk;
439 ntc = (tc % chunk ? 1 : 0) + tc / chunk;
625 // Calculate chunk for metadata report
652 // similar to balanced, but chunk adjusted to multiple of simd width
659 pr->u.p.parm1 = ((tc + nth - 1) / nth + chunk - 1) & ~(chunk - 1);
673 if ((2L * chunk + 1) * nproc >= tc) {
674 /* chunk size too large, switch to dynamic */
679 pr->u.p.parm2 = guided_int_param * nproc * (chunk + 1);
703 if ((2L * chunk + 1) * nproc >= tc) {
704 /* chunk size too large, switch to dynamic */
726 long double target = ((long double)chunk * 2 + 1) * nproc / tc;
728 /* crossover point--chunk indexes equal to or greater than
757 <= chunk */
807 cross * chunk;
840 // bounds calculations in the get next chunk routine.
854 parm1 = chunk;
964 typename traits_t<T>::signed_t chunk, int push_ws) {
994 "chunk:%%%s lb:%%%s ub:%%%s st:%%%s\n",
997 KD_TRACE(10, (buff, gtid, schedule, chunk, lb, ub, st));
1048 kmp_uint64 cur_chunk = chunk;
1089 chunk, (T)th->th.th_team_nproc,
1177 * every chunk of iterations. If the ordered section(s) were not executed
1178 * for this iteration (or every iteration in this chunk), we need to set the
1370 T chunk = pr->u.p.parm1;
1386 // try to get own chunk of iterations
1477 // steal 1 chunk of 1..7 remaining
1497 // now update own count and ub with stolen range excluding init chunk
1522 vnew.p.count++; // get chunk from head of self range
1616 vnew.p.ub -= 1; // steal 1 chunk of 1..7 remaining
1618 KMP_DEBUG_ASSERT(vnew.p.ub * (UT)chunk <= trip);
1665 init *= chunk;
1666 limit = chunk + init - 1;
1814 pr->u.p.parm2) { // compare with K*nproc*(chunk+1), K=2 by default
1828 last = true; // the last chunk
1838 // CAS was successful, chunk obtained
1865 // same as iterative but curr-chunk adjusted to be multiple of given
1866 // chunk
1867 T chunk = pr->u.p.parm1;
1881 KMP_DEBUG_ASSERT(chunk && init % chunk == 0);
1882 // compare with K*nproc*(chunk+1), K=2 by default
1887 (ST)chunk);
1894 if ((T)remaining > chunk) {
1895 limit = init + chunk - 1;
1897 last = true; // the last chunk
1907 UT rem = span % chunk;
1908 if (rem) // adjust so that span%chunk == 0
1909 span += chunk - rem;
1913 // CAS was successful, chunk obtained
1958 chunk sizes */
2133 ompt_dispatch_chunk_t chunk; \
2135 OMPT_GET_DISPATCH_CHUNK(chunk, lb, ub, st); \
2136 instance.ptr = &chunk; \
2232 T chunk = pr->u.p.parm1;
2237 init = chunk * pr->u.p.count++;
2254 limit = chunk + init - 1;
2744 UT chunk = trip_count / nteams;
2747 incr * (team_id * chunk + (team_id < extras ? team_id : extras));
2748 *pupper = *plower + chunk * incr - (team_id < extras ? 0 : incr);
2783 // T lb, T ub, ST st, ST chunk )
2795 @param chunk The chunk size to block with
2804 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk) {
2809 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2816 kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk) {
2821 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2829 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk) {
2834 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2842 kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk) {
2847 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2862 kmp_int32 chunk) {
2868 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2874 kmp_int32 chunk) {
2880 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk, true);
2886 kmp_int64 chunk) {
2892 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2898 kmp_int64 chunk) {
2904 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk, true);
2910 @param p_last Pointer to a flag set to one if this is the last chunk or zero
2912 @param p_lb Pointer to the lower bound for the next chunk of work
2913 @param p_ub Pointer to the upper bound for the next chunk of work
2914 @param p_st Pointer to the stride for the next chunk of work
2917 Get the next dynamically allocated chunk of work for this thread.
3102 kmp_int32 ub, kmp_int32 st, kmp_int32 chunk,
3104 __kmp_dispatch_init<kmp_int32>(loc, gtid, schedule, lb, ub, st, chunk,
3110 kmp_uint32 ub, kmp_int32 st, kmp_int32 chunk,
3112 __kmp_dispatch_init<kmp_uint32>(loc, gtid, schedule, lb, ub, st, chunk,
3118 kmp_int64 ub, kmp_int64 st, kmp_int64 chunk,
3120 __kmp_dispatch_init<kmp_int64>(loc, gtid, schedule, lb, ub, st, chunk,
3126 kmp_uint64 ub, kmp_int64 st, kmp_int64 chunk,
3128 __kmp_dispatch_init<kmp_uint64>(loc, gtid, schedule, lb, ub, st, chunk,