Lines Matching defs:team
88 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
92 static void __kmp_partition_places(kmp_team_t *team,
98 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
112 void __kmp_resize_dist_barrier(kmp_team_t *team, int old_nthreads,
114 void __kmp_add_threads_to_team(kmp_team_t *team, int new_nthreads);
533 /* Print out the storage map for the major kmp_team_t team data structures
536 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
538 int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
539 __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
542 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
543 &team->t.t_bar[bs_last_barrier],
547 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
548 &team->t.t_bar[bs_plain_barrier + 1],
552 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
553 &team->t.t_bar[bs_forkjoin_barrier + 1],
558 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
559 &team->t.t_bar[bs_reduction_barrier + 1],
565 -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
569 -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
572 __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
573 &team->t.t_disp_buffer[num_disp_buff],
650 kmp_team_t *team = __kmp_team_from_gtid(gtid);
662 if (!team->t.t_serialized) {
664 KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
676 kmp_team_t *team = __kmp_team_from_gtid(gtid);
684 if (!team->t.t_serialized) {
687 /* use the tid of the next thread in this team */
689 team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
702 kmp_team_t *team;
709 team = th->th.th_team;
714 if (team->t.t_serialized) {
720 /* try to set team count to thread count--success means thread got the
723 if (team->t.t_construct == old_this) {
724 status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
730 team->t.t_active_level == 1) {
731 // Only report metadata by primary thread of active team at level 1
762 * set_nproc is the number of threads requested for the team
900 // If it is not for initializing the hidden helper team, we need to take
939 ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
956 /* Allocate threads from the thread pool and assign them to the new team. We are
959 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
965 KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
971 master_th->th.th_team = team;
972 master_th->th.th_team_nproc = team->t.t_nproc;
975 master_th->th.th_dispatch = &team->t.t_dispatch[0];
977 /* make sure we are not the optimized hot team */
983 int level = team->t.t_active_level - 1; // index in array of hot teams
989 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
990 master_th->th.th_teams_level == team->t.t_level) {
993 } // team->t.t_level will be increased inside parallel
997 // hot team has already been allocated for given level
998 KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
999 use_hot_team = 1; // the team is ready to use
1002 hot_teams[level].hot_team = team; // remember new hot team
1003 hot_teams[level].hot_team_nth = team->t.t_nproc;
1010 use_hot_team = team == root->r.r_hot_team;
1015 team->t.t_threads[0] = master_th;
1016 __kmp_initialize_info(master_th, team, 0, master_gtid);
1019 for (i = 1; i < team->t.t_nproc; i++) {
1021 /* fork or reallocate a new thread and install it in team */
1022 kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1023 team->t.t_threads[i] = thr;
1025 KMP_DEBUG_ASSERT(thr->th.th_team == team);
1026 /* align team and thread arrived states */
1029 __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1030 __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1031 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1032 team->t.t_bar[bs_plain_barrier].b_arrived));
1038 kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1040 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1043 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1054 __kmp_partition_places(team);
1058 if (team->t.t_nproc > 1 &&
1060 team->t.b->update_num_threads(team->t.t_nproc);
1061 __kmp_add_threads_to_team(team, team->t.t_nproc);
1068 KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team->t.t_parent, master_th);
1071 ("__kmp_fork_team_threads: Primary T#%d pushing task_team %p / team "
1072 "%p, new task_team %p / team %p\n",
1074 team->t.t_parent, team->t.t_task_team[master_th->th.th_task_state],
1075 team));
1077 // Store primary thread's current task state on new team
1078 KMP_CHECK_UPDATE(team->t.t_primary_task_state,
1081 // Restore primary thread's task state to hot team's state
1083 if (team->t.t_nproc > 1) {
1084 KMP_DEBUG_ASSERT(team->t.t_threads[1]->th.th_task_state == 0 ||
1085 team->t.t_threads[1]->th.th_task_state == 1);
1087 team->t.t_threads[1]->th.th_task_state);
1092 // Store primary thread's current task_state on new team
1093 KMP_CHECK_UPDATE(team->t.t_primary_task_state,
1095 // Are not using hot team, so set task state to 0.
1100 if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1101 for (i = 0; i < team->t.t_nproc; i++) {
1102 kmp_info_t *thr = team->t.t_threads[i];
1103 if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1104 thr->th.th_prev_level != team->t.t_level) {
1105 team->t.t_display_affinity = 1;
1115 // Propagate any changes to the floating point control registers out to the team
1116 // We try to avoid unnecessary writes to the relevant cache line in the team
1118 inline static void propagateFPControl(kmp_team_t *team) {
1131 // our objective is the same. We have to ensure that the values in the team
1136 // threads in the team to have to read it again.
1137 KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1138 KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1141 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1143 // Similarly here. Don't write to this cache-line in the team structure
1145 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1150 // the team.
1151 inline static void updateHWFPControl(kmp_team_t *team) {
1152 if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1153 // Only reset the fp control regs if they have been changed in the team.
1161 if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1163 __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1166 if (team->t.t_mxcsr != mxcsr) {
1167 __kmp_load_mxcsr(&team->t.t_mxcsr);
1176 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1179 /* Run a parallel region that has been serialized, so runs only in a team of the
1199 /* utilize the serialized team held by this thread */
1243 /* this serial team was already used
1259 /* setup new serialized team and install it */
1267 ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1276 ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1280 /* we have to initialize this serial team */
1292 // Save previous team's task state on serial team structure
1307 team->t.t_serialized? */
1356 /* this serialized team is already being used,
1378 "of serial team %p to %d\n",
1392 /* allocate/push task team stack */
1445 // Test if this fork is for a team closely nested in a teams construct
1462 // The team is actual (hot), all workers are ready at the fork barrier.
1463 // No lock needed to initialize the team a bit, then free workers.
1574 // If the threads allocated to the team are less than the thread limit, update
1575 // the thread limit here. th_teams_size.nth is specific to this team nested
1576 // in a teams construct, the team is fully created, and we're about to do
1590 /* Change number of threads in the team if requested */
1596 // even when the team size appears not to have changed from the max.
1607 // Keep extra threads hot in the team for possible next parallels
1673 KF_TRACE(10, ("__kmp_fork_in_teams: before internal fork: root=%p, team=%p, "
1677 KF_TRACE(10, ("__kmp_fork_in_teams: after internal fork: root=%p, team=%p, "
1710 kmp_team_t *team;
1738 // Get args from parent team for teams construct
1800 team = master_th->th.th_team;
1801 // team->t.t_pkfn = microtask;
1802 team->t.t_invoke = invoker;
1803 __kmp_alloc_argv_entries(argc, team, TRUE);
1804 team->t.t_argc = argc;
1805 argv = (void **)team->t.t_argv;
1810 team->t.t_level--;
1935 kmp_team_t *team;
2049 // we are allocating the team
2061 KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team\n", gtid));
2086 // execution it will be freed later after team of threads created
2142 // Figure out the proc_bind_policy for the new team.
2188 /* allocate a new parallel team */
2190 team = __kmp_allocate_team(root, nthreads, nthreads,
2197 copy_icvs((kmp_internal_control_t *)team->t.b->team_icvs, &new_icvs);
2199 /* allocate a new parallel team */
2201 team = __kmp_allocate_team(root, nthreads, nthreads,
2209 copy_icvs((kmp_internal_control_t *)team->t.b->team_icvs,
2213 10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2215 /* setup the new team */
2216 KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2217 KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2218 KMP_CHECK_UPDATE(team->t.t_ident, loc);
2219 KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2220 KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2222 KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2225 KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2229 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2231 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2235 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2237 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2241 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2243 KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2244 KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2246 // Check if hot team has potentially outdated list, and if so, free it
2247 if (team->t.t_nested_nth &&
2248 team->t.t_nested_nth != parent_team->t.t_nested_nth) {
2249 KMP_INTERNAL_FREE(team->t.t_nested_nth->nth);
2250 KMP_INTERNAL_FREE(team->t.t_nested_nth);
2251 team->t.t_nested_nth = NULL;
2253 team->t.t_nested_nth = parent_team->t.t_nested_nth;
2257 team->t.t_nested_nth = nested_nth;
2264 // Update the floating point rounding in the team if required.
2265 propagateFPControl(team);
2273 ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2274 gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2275 team->t.t_nproc));
2276 KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2277 (team->t.t_master_tid == 0 &&
2278 (team->t.t_parent == root->r.r_root_team ||
2279 team->t.t_parent->t.t_serialized)));
2283 argv = (void **)team->t.t_argv;
2292 // Get args from parent team for teams construct
2293 KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2298 KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2302 __kmp_fork_team_threads(root, team, master_th, gtid, !ap);
2303 __kmp_setup_icv_copy(team, nthreads,
2313 if (team->t.t_active_level == 1 // only report frames at level 1
2325 team->t.t_region_time = tmp_time;
2332 __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2338 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2341 ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2342 root, team, master_th, gtid));
2348 KMP_DEBUG_ASSERT(team->t.t_stack_id == NULL);
2349 team->t.t_stack_id = __kmp_itt_stack_caller_create();
2352 // current team will be used for parallel inside the teams;
2364 __kmp_internal_fork(loc, gtid, team);
2365 KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2367 root, team, master_th, gtid));
2377 team->t.t_id, team->t.t_pkfn));
2388 if (!team->t.t_invoke(gtid)) {
2400 team->t.t_id, team->t.t_pkfn));
2415 kmp_team_t *team) {
2418 ((team->t.t_serialized) ? ompt_state_work_serial
2423 kmp_team_t *team, ompt_data_t *parallel_data,
2432 __kmp_join_restore_state(thread, team);
2444 kmp_team_t *team;
2455 team = master_th->th.th_team;
2456 parent_team = team->t.t_parent;
2461 void *team_microtask = (void *)team->t.t_pkfn;
2466 !(team->t.t_serialized && fork_context == fork_context_gnu)) {
2473 KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2475 __kmp_gtid_from_thread(master_th), team,
2476 team->t.t_task_team[master_th->th.th_task_state],
2478 KMP_DEBUG_ASSERT_TASKTEAM_INVARIANT(team, master_th);
2482 if (team->t.t_serialized) {
2485 int level = team->t.t_level;
2490 team->t.t_level++;
2495 team->t.t_serialized++;
2512 master_active = team->t.t_master_active;
2516 // But there is barrier for external team (league).
2517 __kmp_internal_join(loc, gtid, team);
2520 KMP_DEBUG_ASSERT(team->t.t_stack_id != NULL);
2522 __kmp_itt_stack_caller_destroy((__itt_caller)team->t.t_stack_id);
2523 team->t.t_stack_id = NULL;
2544 ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2545 void *codeptr = team->t.ompt_team_info.master_return_address;
2550 if (team->t.t_active_level == 1 &&
2558 __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2570 master_th->th.th_first_place = team->t.t_first_place;
2571 master_th->th.th_last_place = team->t.t_last_place;
2576 team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2577 team->t.t_level == master_th->th.th_teams_level + 1) {
2578 // AC: We need to leave the team structure intact at the end of parallel
2579 // inside the teams construct, so that at the next parallel same (hot) team
2586 int ompt_team_size = team->t.t_nproc;
2598 team->t.t_level--;
2599 team->t.t_active_level--;
2602 // Restore number of threads in the team if needed. This code relies on
2609 kmp_info_t **other_threads = team->t.t_threads;
2610 team->t.t_nproc = new_num;
2614 // Adjust states of non-used threads of the team
2620 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2623 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2643 /* do cleanup and restore the parent team */
2644 master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2645 master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2647 master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2655 team->t.t_level > master_th->th.th_teams_level) {
2668 int ompt_team_size = (flags == ompt_task_initial) ? 0 : team->t.t_nproc;
2678 KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2679 master_th, team));
2682 master_th->th.th_def_allocator = team->t.t_def_allocator;
2688 updateHWFPControl(team);
2693 __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2697 region otherwise assertions may fail occasionally since the old team may be
2706 /* restore serialized team, if need be */
2716 // Restore primary thread's task state from team structure
2717 KMP_DEBUG_ASSERT(team->t.t_primary_task_state == 0 ||
2718 team->t.t_primary_task_state == 1);
2719 master_th->th.th_task_state = (kmp_uint8)team->t.t_primary_task_state;
2721 // Copy the task team from the parent team to the primary thread
2725 ("__kmp_join_call: Primary T#%d restoring task_team %p, team %p\n",
2758 serial team stack. If so, do it. */
2775 if (push) { /* push a record on the serial team's stack */
2812 // If this omp_set_num_threads() call will cause the hot team size to be
2834 // When decreasing team size, threads no longer in the team should unref
2835 // task team.
2947 // kmp_team_t *team;
3052 kmp_team_t *team;
3064 team = thr->th.th_team;
3065 ii = team->t.t_level;
3088 dd = team->t.t_serialized;
3091 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
3093 if ((team->t.t_serialized) && (!dd)) {
3094 team = team->t.t_parent;
3098 team = team->t.t_parent;
3099 dd = team->t.t_serialized;
3104 return (dd > 1) ? (0) : (team->t.t_master_tid);
3110 kmp_team_t *team;
3122 team = thr->th.th_team;
3123 ii = team->t.t_level;
3144 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
3146 if (team->t.t_serialized && (!dd)) {
3147 team = team->t.t_parent;
3151 team = team->t.t_parent;
3156 return team->t.t_nproc;
3194 at least argc number of *t_argv entries for the requested team. */
3195 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
3197 KMP_DEBUG_ASSERT(team);
3198 if (!realloc || argc > team->t.t_max_argc) {
3200 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
3202 team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3204 if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3205 __kmp_free((void *)team->t.t_argv);
3209 team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3210 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
3212 team->t.t_id, team->t.t_max_argc));
3213 team->t.t_argv = &team->t.t_inline_argv[0];
3216 -1, &team->t.t_inline_argv[0],
3217 &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3219 team->t.t_id);
3223 team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3226 KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3228 team->t.t_id, team->t.t_max_argc));
3229 team->t.t_argv =
3230 (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3232 __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3233 &team->t.t_argv[team->t.t_max_argc],
3234 sizeof(void *) * team->t.t_max_argc,
3235 "team_%d.t_argv", team->t.t_id);
3241 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3244 team->t.t_threads =
3246 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3248 team->t.t_dispatch =
3250 team->t.t_implicit_task_taskdata =
3252 team->t.t_max_nproc = max_nth;
3256 team->t.t_disp_buffer[i].buffer_index = i;
3257 team->t.t_disp_buffer[i].doacross_buf_idx = i;
3261 static void __kmp_free_team_arrays(kmp_team_t *team) {
3264 for (i = 0; i < team->t.t_max_nproc; ++i) {
3265 if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3266 __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3267 team->t.t_dispatch[i].th_disp_buffer = NULL;
3271 __kmp_dispatch_free_hierarchies(team);
3273 __kmp_free(team->t.t_threads);
3274 __kmp_free(team->t.t_disp_buffer);
3275 __kmp_free(team->t.t_dispatch);
3276 __kmp_free(team->t.t_implicit_task_taskdata);
3277 team->t.t_threads = NULL;
3278 team->t.t_disp_buffer = NULL;
3279 team->t.t_dispatch = NULL;
3280 team->t.t_implicit_task_taskdata = 0;
3283 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3284 kmp_info_t **oldThreads = team->t.t_threads;
3286 __kmp_free(team->t.t_disp_buffer);
3287 __kmp_free(team->t.t_dispatch);
3288 __kmp_free(team->t.t_implicit_task_taskdata);
3289 __kmp_allocate_team_arrays(team, max_nth);
3291 KMP_MEMCPY(team->t.t_threads, oldThreads,
3292 team->t.t_nproc * sizeof(kmp_info_t *));
3333 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3337 0; // probably =team->t.t_serial like in save_inter_controls
3338 copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3365 /* setup the root team for this task */
3366 /* allocate the root team structure */
3382 // team.
3391 /* initialize root team */
3400 ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3403 /* setup the hot team for this task */
3404 /* allocate the hot team structure */
3426 /* initialize hot team */
3446 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3448 kmp_team_p const *team // Team to add.
3453 // List is sorted in ascending order by team id.
3459 if (team == NULL) {
3463 __kmp_print_structure_team_accum(list, team->t.t_parent);
3464 __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3466 // Search list for the team.
3468 while (l->next != NULL && l->entry != team) {
3477 while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3481 // Insert team.
3486 l->entry = team;
3491 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3495 if (team != NULL) {
3496 __kmp_printf("%2x %p\n", team->t.t_id, team);
3596 kmp_team_p const *team = list->entry;
3598 __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3599 __kmp_print_structure_team(" Parent Team: ", team->t.t_parent);
3600 __kmp_printf(" Primary TID: %2d\n", team->t.t_master_tid);
3601 __kmp_printf(" Max threads: %2d\n", team->t.t_max_nproc);
3602 __kmp_printf(" Levels of serial: %2d\n", team->t.t_serialized);
3603 __kmp_printf(" Number threads: %2d\n", team->t.t_nproc);
3604 for (i = 0; i < team->t.t_nproc; ++i) {
3606 __kmp_print_structure_thread("", team->t.t_threads[i]);
3608 __kmp_print_structure_team(" Next in pool: ", team->t.t_next_pool);
3622 // Free team list.
3831 // If it is not for initializing the hidden helper team, we need to take
3952 /* setup the serial team held in reserve by the root thread */
3973 // AC: the team created in reserve, not for execution (it is unused for now).
4076 kmp_team_t *team = hot_teams[level].hot_team;
4081 kmp_info_t *th = team->t.t_threads[i];
4089 __kmp_free_team(root, team, NULL);
4212 kmp_team_t *team = thread->th.th_team;
4222 __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
4262 kmp_team_t *team = this_thr->th.th_team;
4265 "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
4267 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4268 team->t.t_implicit_task_taskdata[tid].td_parent);
4275 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4282 KMP_DEBUG_ASSERT(team);
4283 KMP_DEBUG_ASSERT(team->t.t_threads);
4284 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4285 kmp_info_t *master = team->t.t_threads[0];
4291 TCW_SYNC_PTR(this_thr->th.th_team, team);
4308 /* setup the thread's cache of the team structure */
4309 this_thr->th.th_team_nproc = team->t.t_nproc;
4311 this_thr->th.th_team_serialized = team->t.t_serialized;
4313 KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4319 team, tid, TRUE);
4327 this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4372 // Use team max_nproc since this will never change for the team.
4375 (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4377 team->t.t_max_nproc));
4379 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4380 KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4391 &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4397 gtid, team->t.t_id, gtid);
4418 /* allocate a new thread for the requesting team. this is only called from
4423 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4430 KMP_DEBUG_ASSERT(root && team);
4437 * the main hidden helper thread. The hidden helper team should always
4439 if (__kmp_thread_pool && !KMP_HIDDEN_HELPER_TEAM(team)) {
4461 __kmp_initialize_info(new_thr, team, new_tid,
4472 // Thread activated in __kmp_allocate_team when increasing team size
4502 KMP_ASSERT(KMP_HIDDEN_HELPER_TEAM(team) || __kmp_nth == __kmp_all_nth);
4599 // add the reserve serialized team, initialized from the team's primary thread
4601 kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4612 serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4620 __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4633 /* Initialize these only once when thread is grabbed for a team allocation */
4642 balign[b].bb.team = NULL;
4719 /* Reinitialize team for reuse.
4720 The hot team code calls this case at every fork barrier, so EPCC barrier
4721 test are extremely sensitive to changes in it, esp. writes to the team
4724 static void __kmp_reinitialize_team(kmp_team_t *team,
4727 KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4728 team->t.t_threads[0], team));
4729 KMP_DEBUG_ASSERT(team && new_icvs);
4731 KMP_CHECK_UPDATE(team->t.t_ident, loc);
4733 KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4735 __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4736 copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4738 KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4739 team->t.t_threads[0], team));
4742 /* Initialize the team data structure.
4745 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4748 KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4751 KMP_DEBUG_ASSERT(team);
4752 KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4753 KMP_DEBUG_ASSERT(team->t.t_threads);
4756 team->t.t_master_tid = 0; /* not needed */
4757 /* team->t.t_master_bar; not needed */
4758 team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4759 team->t.t_nproc = new_nproc;
4761 /* team->t.t_parent = NULL; TODO not needed & would mess up hot team */
4762 team->t.t_next_pool = NULL;
4763 /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4764 * up hot team */
4766 TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4767 team->t.t_invoke = NULL; /* not needed */
4769 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4770 team->t.t_sched.sched = new_icvs->sched.sched;
4773 team->t.t_fp_control_saved = FALSE; /* not needed */
4774 team->t.t_x87_fpu_control_word = 0; /* not needed */
4775 team->t.t_mxcsr = 0; /* not needed */
4778 team->t.t_construct = 0;
4780 team->t.t_ordered.dt.t_value = 0;
4781 team->t.t_master_active = FALSE;
4784 team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4787 team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4790 team->t.t_control_stack_top = NULL;
4792 __kmp_reinitialize_team(team, new_icvs, loc);
4795 KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4799 static inline void __kmp_set_thread_place(kmp_team_t *team, kmp_info_t *th,
4805 if (__kmp_display_affinity && team->t.t_display_affinity != 1)
4806 team->t.t_display_affinity = 1;
4817 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4818 // Do not partition places for the hidden helper team
4819 if (KMP_HIDDEN_HELPER_TEAM(team))
4821 // Copy the primary thread's place partition to the team struct
4822 kmp_info_t *master_th = team->t.t_threads[0];
4824 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4829 team->t.t_first_place = first_place;
4830 team->t.t_last_place = last_place;
4834 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4835 team->t.t_id, masters_place, first_place, last_place));
4842 KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4847 int n_th = team->t.t_nproc;
4849 kmp_info_t *th = team->t.t_threads[f];
4851 __kmp_set_thread_place(team, th, first_place, last_place, masters_place);
4855 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4862 int n_th = team->t.t_nproc;
4872 kmp_info_t *th = team->t.t_threads[f];
4882 __kmp_set_thread_place(team, th, first_place, last_place, place);
4886 __kmp_gtid_from_thread(team->t.t_threads[f]),
4887 team->t.t_id, f, place, first_place, last_place));
4898 kmp_info_t *th = team->t.t_threads[f];
4901 __kmp_set_thread_place(team, th, first_place, last_place, place);
4933 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4942 int n_th = team->t.t_nproc;
4965 kmp_info_t *th = team->t.t_threads[f];
4991 __kmp_set_thread_place(team, th, fplace, place, nplace);
5005 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
5055 th = team->t.t_threads[f];
5057 __kmp_set_thread_place(team, th, first, last, place);
5061 __kmp_gtid_from_thread(team->t.t_threads[f]),
5062 team->t.t_id, f, th->th.th_new_place,
5080 kmp_info_t *th = team->t.t_threads[f];
5083 __kmp_set_thread_place(team, th, place, place, place);
5114 __kmp_gtid_from_thread(team->t.t_threads[f]),
5115 team->t.t_id, f, th->th.th_new_place,
5126 KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
5131 /* allocate a new team data structure to use. take one off of the free pool if
5143 kmp_team_t *team;
5156 team = master->th.th_team;
5157 level = team->t.t_active_level;
5161 team->t.t_pkfn ==
5164 team->t.t_level)) { // or nested parallel inside the teams
5171 master->th.th_teams_level >= team->t.t_level) ||
5172 (team->t.t_pkfn == (microtask_t)__kmp_teams_master))
5178 // hot team has already been allocated for given level
5188 // Optimization to use a "hot" team
5192 team = hot_teams[level].hot_team;
5194 team = root->r.r_hot_team;
5198 KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5200 team->t.t_task_team[0], team->t.t_task_team[1]));
5204 if (team->t.t_nproc != new_nproc &&
5207 int old_nthr = team->t.t_nproc;
5208 __kmp_resize_dist_barrier(team, old_nthr, new_nproc);
5211 // If not doing the place partition, then reset the team's proc bind
5214 team->t.t_proc_bind = proc_bind_default;
5218 if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
5219 KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
5221 // team size was already reduced, so we check the special flag
5222 if (team->t.t_size_changed == -1) {
5223 team->t.t_size_changed = 1;
5225 KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
5228 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
5231 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
5233 __kmp_reinitialize_team(team, new_icvs,
5236 KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
5237 team->t.t_threads[0], team));
5238 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5241 if ((team->t.t_size_changed == 0) &&
5242 (team->t.t_proc_bind == new_proc_bind)) {
5246 __kmp_partition_places(team, 1);
5249 KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
5251 team->t.t_id, new_proc_bind, team->t.t_first_place,
5252 team->t.t_last_place));
5255 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5256 __kmp_partition_places(team);
5260 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5262 } else if (team->t.t_nproc > new_nproc) {
5264 ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
5267 team->t.t_size_changed = 1;
5270 // Activate team threads via th_used_in_team
5271 __kmp_add_threads_to_team(team, new_nproc);
5273 // When decreasing team size, threads no longer in the team should
5274 // unref task team.
5276 for (f = new_nproc; f < team->t.t_nproc; f++) {
5277 kmp_info_t *th = team->t.t_threads[f];
5284 // AC: saved number of threads should correspond to team's value in this
5285 // mode, can be bigger in mode 1, when hot team has threads in reserve
5286 KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
5290 for (f = new_nproc; f < team->t.t_nproc; f++) {
5291 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5292 __kmp_free_thread(team->t.t_threads[f]);
5293 team->t.t_threads[f] = NULL;
5298 // When keeping extra threads in team, switch threads to wait on own
5300 for (f = new_nproc; f < team->t.t_nproc; ++f) {
5301 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5302 kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
5312 team->t.t_nproc = new_nproc;
5313 // TODO???: team->t.t_max_active_levels = new_max_active_levels;
5314 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
5315 __kmp_reinitialize_team(team, new_icvs,
5320 team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5325 KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5326 team->t.t_threads[0], team));
5328 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5331 for (f = 0; f < team->t.t_nproc; f++) {
5332 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5333 team->t.t_threads[f]->th.th_team_nproc ==
5334 team->t.t_nproc);
5339 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5341 __kmp_partition_places(team);
5344 } else { // team->t.t_nproc < new_nproc
5347 ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5349 int old_nproc = team->t.t_nproc; // save old value and use to update only
5350 team->t.t_size_changed = 1;
5356 kmp_info_t **other_threads = team->t.t_threads;
5357 for (f = team->t.t_nproc; f < avail_threads; ++f) {
5358 // Adjust barrier data of reserved threads (if any) of the team
5363 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5366 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5374 team->t.t_nproc = new_nproc; // just get reserved threads involved
5378 team->t.t_nproc = hot_teams[level].hot_team_nth;
5379 hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5381 if (team->t.t_max_nproc < new_nproc) {
5383 __kmp_reallocate_team_arrays(team, new_nproc);
5384 __kmp_reinitialize_team(team, new_icvs, NULL);
5397 /* allocate new threads for the hot team */
5398 for (f = team->t.t_nproc; f < new_nproc; f++) {
5399 kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5401 team->t.t_threads[f] = new_worker;
5404 ("__kmp_allocate_team: team %d init T#%d arrived: "
5406 team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5407 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5408 team->t.t_bar[bs_plain_barrier].b_arrived));
5414 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5418 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5434 // Activate team threads via th_used_in_team
5435 __kmp_add_threads_to_team(team, new_nproc);
5439 __kmp_initialize_team(team, new_nproc, new_icvs,
5443 KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5444 for (f = 0; f < team->t.t_nproc; ++f)
5445 __kmp_initialize_info(team->t.t_threads[f], team, f,
5446 __kmp_gtid_from_tid(f, team));
5448 // set th_task_state for new threads in hot team with older thread's state
5449 kmp_uint8 old_state = team->t.t_threads[old_nproc - 1]->th.th_task_state;
5450 for (f = old_nproc; f < team->t.t_nproc; ++f)
5451 team->t.t_threads[f]->th.th_task_state = old_state;
5454 for (f = 0; f < team->t.t_nproc; ++f) {
5455 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5456 team->t.t_threads[f]->th.th_team_nproc ==
5457 team->t.t_nproc);
5462 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5464 __kmp_partition_places(team);
5472 kmp_info_t *thr = team->t.t_threads[f];
5481 // team.
5483 kmp_info_t *thr = team->t.t_threads[f];
5487 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5490 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5498 __kmp_alloc_argv_entries(argc, team, TRUE);
5499 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5500 // The hot team re-uses the previous task team,
5503 KF_TRACE(10, (" hot_team = %p\n", team));
5507 KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5509 team->t.t_task_team[0], team->t.t_task_team[1]));
5514 __ompt_team_assign_id(team, ompt_parallel_data);
5519 return team;
5522 /* next, let's try to take one from the team pool */
5524 for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5527 if (team->t.t_max_nproc >= max_nproc) {
5528 /* take this team from the team pool */
5529 __kmp_team_pool = team->t.t_next_pool;
5533 if (!team->t.b) { // Allocate barrier structure
5534 team->t.b = distributedBarrier::allocate(__kmp_dflt_team_nth_ub);
5538 /* setup the team for fresh use */
5539 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5543 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5544 team->t.t_task_team[0] = NULL;
5545 team->t.t_task_team[1] = NULL;
5548 __kmp_alloc_argv_entries(argc, team, TRUE);
5549 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5552 20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5553 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5557 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5559 team->t.t_bar[b].b_master_arrived = 0;
5560 team->t.t_bar[b].b_team_arrived = 0;
5565 team->t.t_proc_bind = new_proc_bind;
5567 KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5568 team->t.t_id));
5571 __ompt_team_assign_id(team, ompt_parallel_data);
5574 team->t.t_nested_nth = NULL;
5578 return team;
5581 /* reap team if it is too small, then loop back and check the next one */
5584 /* TODO: Use technique to find the right size hot-team, don't reap them */
5585 team = __kmp_reap_team(team);
5586 __kmp_team_pool = team;
5589 /* nothing available in the pool, no matter, make a new team! */
5591 team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5594 team->t.t_max_nproc = max_nproc;
5598 team->t.b = distributedBarrier::allocate(__kmp_dflt_team_nth_ub);
5603 __kmp_allocate_team_arrays(team, max_nproc);
5605 KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5606 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5610 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5611 team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5613 team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5617 __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5621 __kmp_alloc_argv_entries(argc, team, FALSE);
5622 team->t.t_argc = argc;
5625 ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5626 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5630 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5632 team->t.t_bar[b].b_master_arrived = 0;
5633 team->t.t_bar[b].b_team_arrived = 0;
5638 team->t.t_proc_bind = new_proc_bind;
5641 __ompt_team_assign_id(team, ompt_parallel_data);
5642 team->t.ompt_serialized_team_info = NULL;
5647 team->t.t_nested_nth = NULL;
5649 KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5650 team->t.t_id));
5652 return team;
5658 /* free the team. return it to the team pool. release all the threads
5661 kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5663 KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5664 team->t.t_id));
5668 KMP_DEBUG_ASSERT(team);
5669 KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5670 KMP_DEBUG_ASSERT(team->t.t_threads);
5672 int use_hot_team = team == root->r.r_hot_team;
5676 level = team->t.t_active_level - 1;
5682 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5683 master->th.th_teams_level == team->t.t_level) {
5686 } // team->t.t_level will be increased inside parallel
5692 KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5698 /* team is done working */
5699 TCW_SYNC_PTR(team->t.t_pkfn,
5702 team->t.t_copyin_counter = 0; // init counter for possible reuse
5704 // Do not reset pointer to parent team to NULL for hot teams.
5706 /* if we are non-hot team, release our threads */
5710 for (f = 1; f < team->t.t_nproc; ++f) {
5711 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5712 kmp_info_t *th = team->t.t_threads[f];
5733 kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5735 for (f = 0; f < team->t.t_nproc; ++f) { // threads unref task teams
5736 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5737 team->t.t_threads[f]->th.th_task_team = NULL;
5741 ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5742 __kmp_get_gtid(), task_team, team->t.t_id));
5746 team->t.t_task_team[tt_idx] = NULL;
5752 if (team->t.t_nested_nth && team->t.t_nested_nth != &__kmp_nested_nth &&
5753 team->t.t_nested_nth != team->t.t_parent->t.t_nested_nth) {
5754 KMP_INTERNAL_FREE(team->t.t_nested_nth->nth);
5755 KMP_INTERNAL_FREE(team->t.t_nested_nth);
5757 team->t.t_nested_nth = NULL;
5759 // Reset pointer to parent team only for non-hot teams.
5760 team->t.t_parent = NULL;
5761 team->t.t_level = 0;
5762 team->t.t_active_level = 0;
5765 for (f = 1; f < team->t.t_nproc; ++f) {
5766 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5769 &(team->t.t_threads[f]->th.th_used_in_team), 1, 2);
5771 __kmp_free_thread(team->t.t_threads[f]);
5775 if (team->t.b) {
5777 team->t.b->go_release();
5779 for (f = 1; f < team->t.t_nproc; ++f) {
5780 if (team->t.b->sleep[f].sleep) {
5782 team->t.t_threads[f]->th.th_info.ds.ds_gtid,
5787 // Wait for threads to be removed from team
5788 for (int f = 1; f < team->t.t_nproc; ++f) {
5789 while (team->t.t_threads[f]->th.th_used_in_team.load() != 0)
5795 for (f = 1; f < team->t.t_nproc; ++f) {
5796 team->t.t_threads[f] = NULL;
5799 if (team->t.t_max_nproc > 1 &&
5801 distributedBarrier::deallocate(team->t.b);
5802 team->t.b = NULL;
5804 /* put the team back in the team pool */
5805 /* TODO limit size of team pool, call reap_team if pool too large */
5806 team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5807 __kmp_team_pool = (volatile kmp_team_t *)team;
5808 } else { // Check if team was created for primary threads in teams construct
5810 KMP_DEBUG_ASSERT(team->t.t_threads[1] &&
5811 team->t.t_threads[1]->th.th_cg_roots);
5812 if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
5813 // Clean up the CG root nodes on workers so that this team can be re-used
5814 for (f = 1; f < team->t.t_nproc; ++f) {
5815 kmp_info_t *thr = team->t.t_threads[f];
5839 /* reap the team. destroy it, reclaim all its resources and free its memory */
5840 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5841 kmp_team_t *next_pool = team->t.t_next_pool;
5843 KMP_DEBUG_ASSERT(team);
5844 KMP_DEBUG_ASSERT(team->t.t_dispatch);
5845 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5846 KMP_DEBUG_ASSERT(team->t.t_threads);
5847 KMP_DEBUG_ASSERT(team->t.t_argv);
5852 __kmp_free_team_arrays(team);
5853 if (team->t.t_argv != &team->t.t_inline_argv[0])
5854 __kmp_free((void *)team->t.t_argv);
5855 __kmp_free(team);
5878 // applications. Previously, as threads were freed from the hot team, they
5879 // would be placed back on the free list in inverse order. If the hot team
5881 // back on the hot team in reverse order. This could cause bad cache
5882 // locality problems on programs where the size of the hot team regularly
5896 // uninitialized (NULL team).
5902 balign[b].bb.team = NULL;
5939 * with higher probability when hot team is disabled but can occurs even when
5940 * the hot team is enabled */
6057 /* No tid yet since not part of a team */
6400 // Get the next team from the pool.
6401 kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6402 __kmp_team_pool = team->t.t_next_pool;
6404 team->t.t_next_pool = NULL;
6405 __kmp_reap_team(team);
6474 // If hidden helper team has been initialized, we need to deinit it
6480 // Wait until the hidden helper team has been destroyed
6597 // If hidden helper team has been initialized, we need to deinit it
6603 // Wait until the hidden helper team has been destroyed
7630 // team/threads
7648 kmp_team_t *team) {
7660 KMP_DEBUG_ASSERT(team->t.t_dispatch);
7661 // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
7667 __kmp_push_parallel(gtid, team->t.t_ident);
7673 kmp_team_t *team) {
7675 __kmp_pop_parallel(gtid, team->t.t_ident);
7684 kmp_team_t *team = this_thr->th.th_team;
7686 __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
7690 if (team->t.t_stack_id != NULL) {
7691 __kmp_itt_stack_callee_enter((__itt_caller)team->t.t_stack_id);
7693 KMP_DEBUG_ASSERT(team->t.t_parent->t.t_stack_id != NULL);
7695 (__itt_caller)team->t.t_parent->t.t_stack_id);
7711 exit_frame_p = &(team->t.t_implicit_task_taskdata[tid]
7718 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
7719 my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7721 ompt_team_size = team->t.t_nproc;
7739 rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7740 tid, (int)team->t.t_argc, (void **)team->t.t_argv
7761 if (team->t.t_stack_id != NULL) {
7762 __kmp_itt_stack_callee_leave((__itt_caller)team->t.t_stack_id);
7764 KMP_DEBUG_ASSERT(team->t.t_parent->t.t_stack_id != NULL);
7766 (__itt_caller)team->t.t_parent->t.t_stack_id);
7770 __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7778 kmp_team_t *team = thr->th.th_team;
7779 ident_t *loc = team->t.t_ident;
7803 __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7809 // If the team size was reduced from the limit, set it to the new size
7825 kmp_team_t *team = this_thr->th.th_team;
7831 __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7835 &team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data;
7836 ompt_data_t *parallel_data = &team->t.ompt_team_info.parallel_data;
7839 ompt_scope_begin, parallel_data, task_data, team->t.t_nproc, tid,
7848 __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7853 encountered by this team. since this should be enclosed in the forkjoin
7892 thr->th.th_nt_msg = "Cannot form team with number of threads specified by "
7920 } // prevent team size to exceed thread-limit-var
8059 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
8066 KMP_DEBUG_ASSERT(team);
8067 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
8071 team->t.t_construct = 0; /* no single directives seen yet */
8072 team->t.t_ordered.dt.t_value =
8076 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
8077 if (team->t.t_max_nproc > 1) {
8080 team->t.t_disp_buffer[i].buffer_index = i;
8081 team->t.t_disp_buffer[i].doacross_buf_idx = i;
8084 team->t.t_disp_buffer[0].buffer_index = 0;
8085 team->t.t_disp_buffer[0].doacross_buf_idx = 0;
8089 KMP_ASSERT(this_thr->th.th_team == team);
8092 for (f = 0; f < team->t.t_nproc; f++) {
8093 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
8094 team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
8102 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
8105 KMP_DEBUG_ASSERT(team);
8106 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
8114 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
8118 "team->t.t_nproc=%d\n",
8119 gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
8120 team->t.t_nproc);
8124 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
8164 KMP_ASSERT(this_thr->th.th_team == team);
8171 // Return the worker threads actively spinning in the hot team, if we
8217 // Threads that are active in the thread pool, active in the hot team for this
8220 // new team, but are currently contributing to the system load, and must be
8229 "hot team active = %d\n",
8256 // large as the #active omp thread that are available to add to the team.
8521 /* Getting team information common for all team API */
8527 kmp_team_t *team = thr->th.th_team;
8529 int ii = team->t.t_level;
8530 teams_serialized = team->t.t_serialized;
8534 for (teams_serialized = team->t.t_serialized;
8537 if (team->t.t_serialized && (!teams_serialized)) {
8538 team = team->t.t_parent;
8542 team = team->t.t_parent;
8546 return team;
8553 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
8554 if (team) {
8556 return 0; // teams region is serialized ( 1 team of 1 thread ).
8558 return team->t.t_master_tid;
8566 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
8567 if (team) {
8571 return team->t.t_parent->t.t_nproc;
8939 // another choice of getting a team size (with 1 dynamic deference) is slower
9020 // If the team is serialized (team_size == 1), ignore the forced reduction
9158 // The team size is changing, so distributed barrier must be modified
9159 void __kmp_resize_dist_barrier(kmp_team_t *team, int old_nthreads,
9163 kmp_info_t **other_threads = team->t.t_threads;
9166 // size of the team.
9169 // Ignore threads that are already inactive or not present in the team
9170 if (team->t.t_threads[f]->th.th_used_in_team.load() == 0) {
9176 if (team->t.t_threads[f]->th.th_used_in_team.load() == 3) {
9177 while (team->t.t_threads[f]->th.th_used_in_team.load() == 3)
9181 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 1);
9183 team->t.t_threads[f]->th.th_used_in_team.store(2);
9184 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 2);
9187 team->t.b->go_release();
9204 KMP_DEBUG_ASSERT(team->t.t_threads[f]->th.th_used_in_team.load() == 0);
9210 team->t.b->update_num_threads(new_nthreads);
9211 team->t.b->go_reset();
9214 void __kmp_add_threads_to_team(kmp_team_t *team, int new_nthreads) {
9215 // Add the threads back to the team
9216 KMP_DEBUG_ASSERT(team);
9218 // resize of the team. We're going to set th_used_in_team to 3 to indicate to
9219 // the thread that it should transition itself back into the team. Then, if
9223 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
9225 &(team->t.t_threads[f]->th.th_used_in_team), 0, 3);
9227 __kmp_resume_32(team->t.t_threads[f]->th.th_info.ds.ds_gtid,
9231 // The threads should be transitioning to the team; when they are done, they
9233 // all threads have moved into the team and are waiting in the barrier.
9238 if (team->t.t_threads[f]->th.th_used_in_team.load() == 1) {
9264 // by the main thread after creating the team.
9285 // Create a new root for hidden helper team/threads