Lines Matching defs:team

210 void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team,
217 kmp_info_t **other_threads = team->t.t_threads;
230 kmp_team_t *team;
237 team = this_thr->th.th_team;
239 other_threads = team->t.t_threads;
240 b = team->t.b;
247 gtid, team->t.t_id, tid, bt));
372 gtid, team->t.t_id, tid, bt));
379 kmp_team_t *team;
392 // workers and non-master group leaders need to check their presence in team
396 // Thread is not in use in a team. Wait on location in tid's thread
399 // state to get to 1 which is waiting on go and being in the team
432 // At this point, the thread thinks it is in use in a team, or in
433 // transition to be used in a team, but it might have reached this barrier
434 // before it was marked unused by the team. Unused threads are awoken and
436 // this point by being picked up for use by a different team. Either way,
439 team = this_thr->th.th_team;
441 KMP_DEBUG_ASSERT(team);
442 b = team->t.b;
452 // Wait on go flag on team
466 // use in the team, as it could have been woken up for the purpose of
467 // changing team size, or reaping threads at shutdown.
488 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team,
490 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
491 (kmp_internal_control_t *)team->t.b->team_icvs);
493 &team->t.t_implicit_task_taskdata[tid].td_icvs);
503 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
506 team = this_thr->th.th_team;
507 b = team->t.b;
514 &team->t.t_implicit_task_taskdata[tid].td_icvs);
525 __kmp_dist_barrier_wakeup(bt, team, tid + b->threads_per_group, nproc,
543 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
552 gtid, team->t.t_id, tid, bt));
561 kmp_team_t *team = this_thr->th.th_team;
563 kmp_info_t **other_threads = team->t.t_threads;
568 gtid, team->t.t_id, tid, bt));
584 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(0, team),
585 team->t.t_id, 0, &thr_bar->b_arrived, thr_bar->b_arrived,
588 /* After performing this write, a worker thread may not assume that the team
594 kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
597 // Don't have to worry about sleep bit here or atomic since team setting
600 // Collect all the worker team member threads.
609 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team),
610 team->t.t_id, i,
635 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team),
636 team->t.t_id, i));
644 // Don't have to worry about sleep bit here or atomic since team setting
646 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d "
648 gtid, team->t.t_id, tid, team->t.t_id, &team_bar->b_arrived,
654 gtid, team->t.t_id, tid, bt));
664 kmp_team_t *team;
671 team = __kmp_threads[gtid]->th.th_team;
672 KMP_DEBUG_ASSERT(team != NULL);
673 other_threads = team->t.t_threads;
677 gtid, team->t.t_id, tid, bt));
684 ngo_load(&team->t.t_implicit_task_taskdata[0].td_icvs);
686 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i],
687 team, i, FALSE);
688 ngo_store_icvs(&team->t.t_implicit_task_taskdata[i].td_icvs,
689 &team->t.t_implicit_task_taskdata[0].td_icvs);
707 gtid, team->t.t_id, tid, other_threads[i]->th.th_info.ds.ds_gtid,
708 team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go,
747 // The worker thread may now assume that the team is valid.
750 team = __kmp_threads[gtid]->th.th_team;
752 KMP_DEBUG_ASSERT(team != NULL);
756 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
762 gtid, team->t.t_id, tid, bt));
799 kmp_team_t *team = this_thr->th.th_team;
801 kmp_info_t **other_threads = team->t.t_threads;
811 gtid, team->t.t_id, tid, bt));
826 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
840 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
841 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
856 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
857 team->t.t_id, child_tid));
875 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team),
876 team->t.t_id, parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
880 /* After performing this write, a worker thread may not assume that the team
886 // Need to update the team arrived pointer if we are the primary thread
888 team->t.t_bar[bt].b_arrived = new_state;
890 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
891 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d "
893 gtid, team->t.t_id, tid, team->t.t_id,
894 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
898 gtid, team->t.t_id, tid, bt));
905 kmp_team_t *team;
915 tid)) { // Handle fork barrier workers who aren't part of a team yet
942 // The worker thread may now assume that the team is valid.
943 team = __kmp_threads[gtid]->th.th_team;
944 KMP_DEBUG_ASSERT(team != NULL);
950 team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
953 team = __kmp_threads[gtid]->th.th_team;
954 KMP_DEBUG_ASSERT(team != NULL);
957 gtid, team->t.t_id, tid, bt));
963 kmp_info_t **other_threads = team->t.t_threads;
980 __kmp_init_implicit_task(team->t.t_ident,
981 team->t.t_threads[child_tid], team,
983 copy_icvs(&team->t.t_implicit_task_taskdata[child_tid].td_icvs,
984 &team->t.t_implicit_task_taskdata[0].td_icvs);
991 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
992 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1003 gtid, team->t.t_id, tid, bt));
1011 kmp_team_t *team = this_thr->th.th_team;
1013 kmp_info_t **other_threads = team->t.t_threads;
1024 gtid, team->t.t_id, tid, bt));
1049 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team),
1050 team->t.t_id, parent_tid, &thr_bar->b_arrived,
1055 loop), a worker thread may not assume that the team is valid any more
1064 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1080 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1081 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
1097 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1098 team->t.t_id, child_tid));
1109 // Need to update the team arrived pointer if we are the primary thread
1111 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
1113 team->t.t_bar[bt].b_arrived = new_state;
1114 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d "
1116 gtid, team->t.t_id, tid, team->t.t_id,
1117 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1121 gtid, team->t.t_id, tid, bt));
1130 kmp_team_t *team;
1146 team = __kmp_threads[gtid]->th.th_team;
1147 KMP_DEBUG_ASSERT(team != NULL);
1150 gtid, team->t.t_id, tid, bt));
1154 &team->t.t_implicit_task_taskdata[tid].td_icvs);
1157 } else { // Handle fork barrier workers who aren't part of a team yet
1183 // The worker thread may now assume that the team is valid.
1184 team = __kmp_threads[gtid]->th.th_team;
1185 KMP_DEBUG_ASSERT(team != NULL);
1191 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
1195 other_threads = team->t.t_threads;
1257 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1258 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1269 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
1271 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1278 gtid, team->t.t_id, tid, bt));
1285 Performs the minimum amount of initialization required based on how the team
1287 traditional wake-up mechanisms. For example, if the team size increases,
1288 threads already in the team will respond to on-core wakeup on their parent
1289 thread, but threads newly added to the team will only be listening on the
1294 int tid, kmp_team_t *team) {
1296 bool uninitialized = thr_bar->team == NULL;
1297 bool team_changed = team != thr_bar->team;
1334 thr_bar->team = team;
1336 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1339 thr_bar->team = team;
1341 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1362 kmp_team_t *team = this_thr->th.th_team;
1365 kmp_info_t **other_threads = team->t.t_threads;
1368 int level = team->t.t_level;
1380 gtid, team->t.t_id, tid, bt));
1391 team);
1396 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1404 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state;
1407 gtid, team->t.t_id, tid));
1417 gtid, team->t.t_id, tid,
1418 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1441 gtid, team->t.t_id, tid,
1442 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1449 gtid, team->t.t_id, tid,
1450 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1470 gtid, team->t.t_id, tid,
1471 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1478 gtid, team->t.t_id, tid,
1479 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1493 gtid, team->t.t_id, tid,
1494 __kmp_gtid_from_tid(thr_bar->parent_tid, team), team->t.t_id,
1498 not assume that the team is valid any more - it could be deallocated by
1508 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1514 } else { // Primary thread needs to update the team's b_arrived value
1515 team->t.t_bar[bt].b_arrived = new_state;
1516 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d "
1518 gtid, team->t.t_id, tid, team->t.t_id,
1519 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1521 // Is the team access below unsafe or just technically invalid?
1524 gtid, team->t.t_id, tid, bt));
1531 kmp_team_t *team;
1537 team = __kmp_threads[gtid]->th.th_team;
1538 KMP_DEBUG_ASSERT(team != NULL);
1541 gtid, team->t.t_id, tid, bt));
1546 thr_bar->team == NULL) {
1574 // The worker thread may now assume that the team is valid.
1575 team = __kmp_threads[gtid]->th.th_team;
1576 KMP_DEBUG_ASSERT(team != NULL);
1582 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
1587 int level = team->t.t_level;
1588 if (team->t.t_threads[0]
1590 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1601 // If the team size has increased, we still communicate with old leaves via
1606 tid, team);
1607 // But if the entire team changes, we won't use oncore barrier at all
1613 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
1618 &team->t.t_implicit_task_taskdata[tid].td_icvs);
1624 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1632 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1656 &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
1660 gtid, team->t.t_id, tid,
1661 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1674 // We test team_change on the off-chance that the level 1 team changed.
1686 kmp_info_t *child_thr = team->t.t_threads[child_tid];
1692 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1693 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1712 kmp_info_t *child_thr = team->t.t_threads[child_tid];
1716 gtid, team->t.t_id, tid,
1717 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1729 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1735 gtid, team->t.t_id, tid, bt));
1775 kmp_team_t *team = this_thr->th.th_team;
1831 if (!team->t.t_serialized) {
1841 __kmp_tasking_barrier(team, this_thr, gtid);
1848 access it when the team struct is not guaranteed to exist. */
1854 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1856 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1858 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
1868 if (KMP_MASTER_TID(tid)) { // Primary thread counter stored in team struct
1869 team->t.t_bar[bt].b_master_arrived += 1;
1880 __kmp_task_team_setup(this_thr, team);
1923 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
1928 team->t.t_bar[bt].b_team_arrived += 1;
1932 kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request);
1936 KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq);
1952 this_thr->th.th_teams_size.nteams == 1) && // or inside single team
1953 team->t.t_active_level == 1) {
1956 kmp_info_t **other_threads = team->t.t_threads;
2035 __kmp_task_team_sync(this_thr, team);
2062 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
2063 __kmp_task_team_setup(this_thr, team);
2137 kmp_team_t *team = this_thr->th.th_team;
2139 if (!team->t.t_serialized) {
2170 __kmp_task_team_sync(this_thr, team);
2183 kmp_team_t *team;
2202 team = this_thr->th.th_team;
2203 KMP_DEBUG_ASSERT(nproc == team->t.t_nproc);
2206 team_id = team->t.t_id;
2208 if (master_thread != team->t.t_threads[0]) {
2212 KMP_DEBUG_ASSERT(master_thread == team->t.t_threads[0]);
2218 KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
2232 codeptr = team->t.ompt_team_info.master_return_address;
2257 __kmp_tasking_barrier(team, this_thr, gtid);
2263 KA_TRACE(20, ("__kmp_join_barrier: T#%d, old team = %d, old task_team = "
2266 team->t.t_task_team[this_thr->th.th_task_state],
2268 KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, this_thr);
2273 access it when the team struct is not guaranteed to exist. Doing these
2280 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2282 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2284 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
2322 /* From this point on, the team data structure may be deallocated at any time
2324 threads. Any per-team data items that need to be referenced before the
2328 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
2331 KMP_CHECK_UPDATE(team->t.t_display_affinity, 0);
2337 for (int i = 0; i < team->t.t_nproc; ++i) {
2338 kmp_info_t *team_thread = team->t.t_threads[i];
2357 this_thr->th.th_teams_size.nteams == 1) && // or inside single team
2358 team->t.t_active_level == 1) {
2360 ident_t *loc = team->t.t_ident;
2361 kmp_info_t **other_threads = team->t.t_threads;
2404 ("__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n",
2422 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
2427 if (team)
2429 (team != NULL) ? team->t.t_id : -1, tid));
2442 KMP_DEBUG_ASSERT(team);
2443 kmp_info_t **other_threads = team->t.t_threads;
2449 for (i = 1; i < team->t.t_nproc; ++i) {
2453 gtid, team->t.t_id, other_threads[i]->th.th_info.ds.ds_gtid,
2454 team->t.t_id, other_threads[i]->th.th_info.ds.ds_tid,
2459 KMP_DEBUG_ASSERT(other_threads[i]->th.th_team == team);
2464 __kmp_task_team_setup(this_thr, team);
2468 __kmp_wait_template() can access it when the team struct is not
2475 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2477 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2479 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
2519 ompt_data_t *task_data = (team)
2528 codeptr = team ? team->t.ompt_team_info.master_return_address : NULL;
2566 /* We can now assume that a valid team structure has been allocated by the
2568 however, may not be part of the team, so we can't blindly assume that the
2569 team pointer is non-null. */
2570 team = (kmp_team_t *)TCR_PTR(this_thr->th.th_team);
2571 KMP_DEBUG_ASSERT(team != NULL);
2588 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team,
2590 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
2591 &team->t.t_threads[0]
2599 __kmp_task_team_sync(this_thr, team);
2603 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
2606 if (__kmp_affinity.type == affinity_balanced && team->t.t_size_changed) {
2607 __kmp_balanced_affinity(this_thr, team->t.t_nproc);
2621 if (team->t.t_display_affinity
2623 || (__kmp_affinity.type == affinity_balanced && team->t.t_size_changed)
2628 this_thr->th.th_prev_num_threads = team->t.t_nproc;
2629 this_thr->th.th_prev_level = team->t.t_level;
2633 KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator);
2645 team->t.t_id, tid));
2648 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
2652 KMP_DEBUG_ASSERT(team && new_nproc && new_icvs);
2662 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be
2665 &team->t.t_threads[0]->th.th_bar[bs_forkjoin_barrier].bb.th_fixed_icvs,
2667 KF_TRACE(10, ("__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n", 0,
2668 team->t.t_threads[0], team));
2672 KF_TRACE(10, ("__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n", 0,
2673 team->t.t_threads[0], team));
2678 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be
2682 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2683 f, team->t.t_threads[f], team));
2684 __kmp_init_implicit_task(loc, team->t.t_threads[f], team, f, FALSE);
2685 ngo_store_icvs(&team->t.t_implicit_task_taskdata[f].td_icvs, new_icvs);
2686 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2687 f, team->t.t_threads[f], team));