Lines Matching defs:team

210 void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team,
217 kmp_info_t **other_threads = team->t.t_threads;
230 kmp_team_t *team;
237 team = this_thr->th.th_team;
239 other_threads = team->t.t_threads;
240 b = team->t.b;
247 gtid, team->t.t_id, tid, bt));
372 gtid, team->t.t_id, tid, bt));
379 kmp_team_t *team;
392 // workers and non-master group leaders need to check their presence in team
396 // Thread is not in use in a team. Wait on location in tid's thread
399 // state to get to 1 which is waiting on go and being in the team
432 // At this point, the thread thinks it is in use in a team, or in
433 // transition to be used in a team, but it might have reached this barrier
434 // before it was marked unused by the team. Unused threads are awoken and
436 // this point by being picked up for use by a different team. Either way,
439 team = this_thr->th.th_team;
441 KMP_DEBUG_ASSERT(team);
442 b = team->t.b;
451 // Wait on go flag on team
465 // use in the team, as it could have been woken up for the purpose of
466 // changing team size, or reaping threads at shutdown.
487 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team,
489 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
490 (kmp_internal_control_t *)team->t.b->team_icvs);
492 &team->t.t_implicit_task_taskdata[tid].td_icvs);
502 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
505 team = this_thr->th.th_team;
506 b = team->t.b;
513 &team->t.t_implicit_task_taskdata[tid].td_icvs);
524 __kmp_dist_barrier_wakeup(bt, team, tid + b->threads_per_group, nproc,
542 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
551 gtid, team->t.t_id, tid, bt));
560 kmp_team_t *team = this_thr->th.th_team;
562 kmp_info_t **other_threads = team->t.t_threads;
567 gtid, team->t.t_id, tid, bt));
583 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(0, team),
584 team->t.t_id, 0, &thr_bar->b_arrived, thr_bar->b_arrived,
587 /* After performing this write, a worker thread may not assume that the team
593 kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
596 // Don't have to worry about sleep bit here or atomic since team setting
599 // Collect all the worker team member threads.
608 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team),
609 team->t.t_id, i,
634 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(i, team),
635 team->t.t_id, i));
643 // Don't have to worry about sleep bit here or atomic since team setting
645 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d "
647 gtid, team->t.t_id, tid, team->t.t_id, &team_bar->b_arrived,
653 gtid, team->t.t_id, tid, bt));
663 kmp_team_t *team;
670 team = __kmp_threads[gtid]->th.th_team;
671 KMP_DEBUG_ASSERT(team != NULL);
672 other_threads = team->t.t_threads;
676 gtid, team->t.t_id, tid, bt));
683 ngo_load(&team->t.t_implicit_task_taskdata[0].td_icvs);
685 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i],
686 team, i, FALSE);
687 ngo_store_icvs(&team->t.t_implicit_task_taskdata[i].td_icvs,
688 &team->t.t_implicit_task_taskdata[0].td_icvs);
706 gtid, team->t.t_id, tid, other_threads[i]->th.th_info.ds.ds_gtid,
707 team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go,
746 // The worker thread may now assume that the team is valid.
749 team = __kmp_threads[gtid]->th.th_team;
751 KMP_DEBUG_ASSERT(team != NULL);
755 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
761 gtid, team->t.t_id, tid, bt));
798 kmp_team_t *team = this_thr->th.th_team;
800 kmp_info_t **other_threads = team->t.t_threads;
810 gtid, team->t.t_id, tid, bt));
825 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
839 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
840 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
855 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
856 team->t.t_id, child_tid));
874 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team),
875 team->t.t_id, parent_tid, &thr_bar->b_arrived, thr_bar->b_arrived,
879 /* After performing this write, a worker thread may not assume that the team
885 // Need to update the team arrived pointer if we are the primary thread
887 team->t.t_bar[bt].b_arrived = new_state;
889 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
890 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d "
892 gtid, team->t.t_id, tid, team->t.t_id,
893 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
897 gtid, team->t.t_id, tid, bt));
904 kmp_team_t *team;
914 tid)) { // Handle fork barrier workers who aren't part of a team yet
941 // The worker thread may now assume that the team is valid.
942 team = __kmp_threads[gtid]->th.th_team;
943 KMP_DEBUG_ASSERT(team != NULL);
949 team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
952 team = __kmp_threads[gtid]->th.th_team;
953 KMP_DEBUG_ASSERT(team != NULL);
956 gtid, team->t.t_id, tid, bt));
962 kmp_info_t **other_threads = team->t.t_threads;
979 __kmp_init_implicit_task(team->t.t_ident,
980 team->t.t_threads[child_tid], team,
982 copy_icvs(&team->t.t_implicit_task_taskdata[child_tid].td_icvs,
983 &team->t.t_implicit_task_taskdata[0].td_icvs);
990 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
991 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1002 gtid, team->t.t_id, tid, bt));
1010 kmp_team_t *team = this_thr->th.th_team;
1012 kmp_info_t **other_threads = team->t.t_threads;
1023 gtid, team->t.t_id, tid, bt));
1048 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(parent_tid, team),
1049 team->t.t_id, parent_tid, &thr_bar->b_arrived,
1054 loop), a worker thread may not assume that the team is valid any more
1063 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1079 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1080 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
1096 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1097 team->t.t_id, child_tid));
1108 // Need to update the team arrived pointer if we are the primary thread
1110 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
1112 team->t.t_bar[bt].b_arrived = new_state;
1113 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d "
1115 gtid, team->t.t_id, tid, team->t.t_id,
1116 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1120 gtid, team->t.t_id, tid, bt));
1129 kmp_team_t *team;
1145 team = __kmp_threads[gtid]->th.th_team;
1146 KMP_DEBUG_ASSERT(team != NULL);
1149 gtid, team->t.t_id, tid, bt));
1153 &team->t.t_implicit_task_taskdata[tid].td_icvs);
1156 } else { // Handle fork barrier workers who aren't part of a team yet
1182 // The worker thread may now assume that the team is valid.
1183 team = __kmp_threads[gtid]->th.th_team;
1184 KMP_DEBUG_ASSERT(team != NULL);
1190 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
1194 other_threads = team->t.t_threads;
1256 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1257 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1268 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
1270 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1277 gtid, team->t.t_id, tid, bt));
1284 Performs the minimum amount of initialization required based on how the team
1286 traditional wake-up mechanisms. For example, if the team size increases,
1287 threads already in the team will respond to on-core wakeup on their parent
1288 thread, but threads newly added to the team will only be listening on the
1293 int tid, kmp_team_t *team) {
1295 bool uninitialized = thr_bar->team == NULL;
1296 bool team_changed = team != thr_bar->team;
1333 thr_bar->team = team;
1335 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1338 thr_bar->team = team;
1340 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1361 kmp_team_t *team = this_thr->th.th_team;
1364 kmp_info_t **other_threads = team->t.t_threads;
1367 int level = team->t.t_level;
1379 gtid, team->t.t_id, tid, bt));
1390 team);
1395 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1403 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state;
1406 gtid, team->t.t_id, tid));
1416 gtid, team->t.t_id, tid,
1417 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1440 gtid, team->t.t_id, tid,
1441 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1448 gtid, team->t.t_id, tid,
1449 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1469 gtid, team->t.t_id, tid,
1470 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1477 gtid, team->t.t_id, tid,
1478 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1492 gtid, team->t.t_id, tid,
1493 __kmp_gtid_from_tid(thr_bar->parent_tid, team), team->t.t_id,
1497 not assume that the team is valid any more - it could be deallocated by
1507 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1513 } else { // Primary thread needs to update the team's b_arrived value
1514 team->t.t_bar[bt].b_arrived = new_state;
1515 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d "
1517 gtid, team->t.t_id, tid, team->t.t_id,
1518 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1520 // Is the team access below unsafe or just technically invalid?
1523 gtid, team->t.t_id, tid, bt));
1530 kmp_team_t *team;
1536 team = __kmp_threads[gtid]->th.th_team;
1537 KMP_DEBUG_ASSERT(team != NULL);
1540 gtid, team->t.t_id, tid, bt));
1545 thr_bar->team == NULL) {
1573 // The worker thread may now assume that the team is valid.
1574 team = __kmp_threads[gtid]->th.th_team;
1575 KMP_DEBUG_ASSERT(team != NULL);
1581 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
1586 int level = team->t.t_level;
1587 if (team->t.t_threads[0]
1589 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1600 // If the team size has increased, we still communicate with old leaves via
1605 tid, team);
1606 // But if the entire team changes, we won't use oncore barrier at all
1612 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid,
1617 &team->t.t_implicit_task_taskdata[tid].td_icvs);
1623 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1631 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1655 &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
1659 gtid, team->t.t_id, tid,
1660 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1673 // We test team_change on the off-chance that the level 1 team changed.
1685 kmp_info_t *child_thr = team->t.t_threads[child_tid];
1691 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1692 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1711 kmp_info_t *child_thr = team->t.t_threads[child_tid];
1715 gtid, team->t.t_id, tid,
1716 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
1728 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1734 gtid, team->t.t_id, tid, bt));
1774 kmp_team_t *team = this_thr->th.th_team;
1830 if (!team->t.t_serialized) {
1840 __kmp_tasking_barrier(team, this_thr, gtid);
1847 access it when the team struct is not guaranteed to exist. */
1853 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1855 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1857 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
1867 if (KMP_MASTER_TID(tid)) { // Primary thread counter stored in team struct
1868 team->t.t_bar[bt].b_master_arrived += 1;
1879 __kmp_task_team_setup(this_thr, team);
1922 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
1927 team->t.t_bar[bt].b_team_arrived += 1;
1931 kmp_int32 cancel_request = KMP_ATOMIC_LD_RLX(&team->t.t_cancel_request);
1935 KMP_ATOMIC_ST_RLX(&team->t.t_cancel_request, cancel_noreq);
1951 this_thr->th.th_teams_size.nteams == 1) && // or inside single team
1952 team->t.t_active_level == 1) {
1955 kmp_info_t **other_threads = team->t.t_threads;
2034 __kmp_task_team_sync(this_thr, team);
2061 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
2062 __kmp_task_team_setup(this_thr, team);
2136 kmp_team_t *team = this_thr->th.th_team;
2138 if (!team->t.t_serialized) {
2169 __kmp_task_team_sync(this_thr, team);
2182 kmp_team_t *team;
2201 team = this_thr->th.th_team;
2202 KMP_DEBUG_ASSERT(nproc == team->t.t_nproc);
2205 team_id = team->t.t_id;
2207 if (master_thread != team->t.t_threads[0]) {
2211 KMP_DEBUG_ASSERT(master_thread == team->t.t_threads[0]);
2217 KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
2231 codeptr = team->t.ompt_team_info.master_return_address;
2256 __kmp_tasking_barrier(team, this_thr, gtid);
2262 KA_TRACE(20, ("__kmp_join_barrier: T#%d, old team = %d, old task_team = "
2265 team->t.t_task_team[this_thr->th.th_task_state],
2267 KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, this_thr);
2272 access it when the team struct is not guaranteed to exist. Doing these
2279 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2281 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2283 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
2321 /* From this point on, the team data structure may be deallocated at any time
2323 threads. Any per-team data items that need to be referenced before the
2327 __kmp_task_team_wait(this_thr, team USE_ITT_BUILD_ARG(itt_sync_obj));
2330 KMP_CHECK_UPDATE(team->t.t_display_affinity, 0);
2336 for (int i = 0; i < team->t.t_nproc; ++i) {
2337 kmp_info_t *team_thread = team->t.t_threads[i];
2356 this_thr->th.th_teams_size.nteams == 1) && // or inside single team
2357 team->t.t_active_level == 1) {
2359 ident_t *loc = team->t.t_ident;
2360 kmp_info_t **other_threads = team->t.t_threads;
2403 ("__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n",
2421 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
2426 if (team)
2428 (team != NULL) ? team->t.t_id : -1, tid));
2441 KMP_DEBUG_ASSERT(team);
2442 kmp_info_t **other_threads = team->t.t_threads;
2448 for (i = 1; i < team->t.t_nproc; ++i) {
2452 gtid, team->t.t_id, other_threads[i]->th.th_info.ds.ds_gtid,
2453 team->t.t_id, other_threads[i]->th.th_info.ds.ds_tid,
2458 KMP_DEBUG_ASSERT(other_threads[i]->th.th_team == team);
2463 __kmp_task_team_setup(this_thr, team);
2467 __kmp_wait_template() can access it when the team struct is not
2474 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
2476 team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
2478 this_thr->th.th_team_bt_intervals = KMP_BLOCKTIME_INTERVAL(team, tid);
2518 ompt_data_t *task_data = (team)
2527 codeptr = team ? team->t.ompt_team_info.master_return_address : NULL;
2565 /* We can now assume that a valid team structure has been allocated by the
2567 however, may not be part of the team, so we can't blindly assume that the
2568 team pointer is non-null. */
2569 team = (kmp_team_t *)TCR_PTR(this_thr->th.th_team);
2570 KMP_DEBUG_ASSERT(team != NULL);
2587 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team,
2589 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
2590 &team->t.t_threads[0]
2598 __kmp_task_team_sync(this_thr, team);
2602 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
2605 if (__kmp_affinity.type == affinity_balanced && team->t.t_size_changed) {
2606 __kmp_balanced_affinity(this_thr, team->t.t_nproc);
2620 if (team->t.t_display_affinity
2622 || (__kmp_affinity.type == affinity_balanced && team->t.t_size_changed)
2627 this_thr->th.th_prev_num_threads = team->t.t_nproc;
2628 this_thr->th.th_prev_level = team->t.t_level;
2632 KMP_CHECK_UPDATE(this_thr->th.th_def_allocator, team->t.t_def_allocator);
2644 team->t.t_id, tid));
2647 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
2651 KMP_DEBUG_ASSERT(team && new_nproc && new_icvs);
2661 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be
2664 &team->t.t_threads[0]->th.th_bar[bs_forkjoin_barrier].bb.th_fixed_icvs,
2666 KF_TRACE(10, ("__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n", 0,
2667 team->t.t_threads[0], team));
2671 KF_TRACE(10, ("__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n", 0,
2672 team->t.t_threads[0], team));
2677 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be
2681 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2682 f, team->t.t_threads[f], team));
2683 __kmp_init_implicit_task(loc, team->t.t_threads[f], team, f, FALSE);
2684 ngo_store_icvs(&team->t.t_implicit_task_taskdata[f].td_icvs, new_icvs);
2685 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
2686 f, team->t.t_threads[f], team));