Lines Matching defs:task_team
33 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
38 kmp_task_team_t *task_team);
356 // The function is called under the lock task_team->tt.tt_task_pri_lock.
358 __kmp_get_priority_deque_data(kmp_task_team_t *task_team, kmp_int32 pri) {
360 kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
371 task_team->tt.tt_task_pri_list = list;
372 } else { // task_team->tt.tt_task_pri_list->priority > pri
404 kmp_task_team_t *task_team,
412 kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
414 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
415 if (task_team->tt.tt_task_pri_list == NULL) {
421 task_team->tt.tt_task_pri_list = list;
424 thread_data = __kmp_get_priority_deque_data(task_team, pri);
426 __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
432 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
433 thread_data = __kmp_get_priority_deque_data(task_team, pri);
434 __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
472 task_team->tt.tt_num_task_pri++; // atomic inc
493 kmp_task_team_t *task_team = thread->th.th_task_team;
511 // The first check avoids building task_team thread data if serialized
522 if (UNLIKELY(!KMP_TASKING_ENABLED(task_team))) {
523 __kmp_enable_tasking(task_team, thread);
525 KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
526 KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
531 return __kmp_push_priority_task(gtid, thread, taskdata, task_team, pri);
535 thread_data = &task_team->tt.tt_threads_data[tid];
1041 kmp_task_team_t *task_team =
1196 } else if (task_team && (task_team->tt.tt_found_proxy_tasks ||
1197 task_team->tt.tt_hidden_helper_task_encountered)) {
1521 kmp_task_team_t *task_team = thread->th.th_task_team;
1524 if (!KMP_TASKING_ENABLED(task_team)) {
1528 __kmp_enable_tasking(task_team, thread);
1530 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1538 task_team->tt.tt_found_proxy_tasks == FALSE)
1539 TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1541 task_team->tt.tt_hidden_helper_task_encountered == FALSE)
1542 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, TRUE);
2392 kmp_task_team_t *task_team = thread->th.th_task_team;
2393 if (task_team != NULL) {
2394 if (KMP_TASKING_ENABLED(task_team)) {
3059 kmp_task_team_t *task_team,
3065 int ntasks = task_team->tt.tt_num_task_pri;
3073 if (__kmp_atomic_compare_store(&task_team->tt.tt_num_task_pri, ntasks,
3076 ntasks = task_team->tt.tt_num_task_pri;
3085 kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3107 if (!task_team->tt.tt_untied_task_encountered) {
3111 "from %p: task_team=%p ntasks=%d head=%u tail=%u\n",
3112 gtid, thread_data, task_team, deque_ntasks, target,
3114 task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3134 "%p: task_team=%p ntasks=%d head=%u tail=%u\n",
3135 gtid, thread_data, task_team, deque_ntasks,
3137 task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3160 kmp_task_team_t *task_team,
3168 KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
3171 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
3230 // task_team thread_data before calling this routine.
3232 kmp_task_team_t *task_team,
3245 threads_data = task_team->tt.tt_threads_data;
3248 KMP_DEBUG_ASSERT(victim_tid < task_team->tt.tt_max_threads);
3255 "task_team=%p ntasks=%d head=%u tail=%u\n",
3256 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3262 "task_team=%p ntasks=%d head=%u tail=%u\n",
3263 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3276 "task_team=%p ntasks=%d head=%u tail=%u\n",
3277 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3290 if (!task_team->tt.tt_untied_task_encountered) {
3294 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3295 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3316 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3317 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3343 ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
3344 gtid, count + 1, task_team));
3354 "task_team=%p ntasks=%d head=%u tail=%u\n",
3355 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
3376 kmp_task_team_t *task_team = thread->th.th_task_team;
3388 if (task_team == NULL || current_task == NULL)
3396 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3400 nthreads = task_team->tt.tt_nproc;
3401 unfinished_threads = &(task_team->tt.tt_unfinished_threads);
3408 if (task_team->tt.tt_num_task_pri) { // get priority task first
3409 task = __kmp_get_priority_task(gtid, task_team, is_constrained);
3412 task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
3466 __kmp_steal_task(victim_tid, gtid, task_team, unfinished_threads,
3542 "unfinished_threads to %d task_team=%p\n",
3543 gtid, count, task_team));
3548 // Decrementing task_team->tt.tt_unfinished_threads can allow the primary
3661 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3669 KMP_DEBUG_ASSERT(task_team != NULL);
3672 nthreads = task_team->tt.tt_nproc;
3677 is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
3687 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3757 NULL; // Free list for task_team data structures
3816 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
3817 // The current size is given by task_team -> tt.tt_max_threads.
3819 kmp_task_team_t *task_team) {
3824 if (TCR_4(task_team->tt.tt_found_tasks)) {
3829 threads_data_p = &task_team->tt.tt_threads_data;
3830 nthreads = task_team->tt.tt_nproc;
3831 maxthreads = task_team->tt.tt_max_threads;
3836 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3838 if (!TCR_4(task_team->tt.tt_found_tasks)) {
3853 "threads data for task_team %p, new_size = %d, old_size = %d\n",
3854 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3877 "threads data for task_team %p, size = %d\n",
3878 __kmp_gtid_from_thread(thread), task_team, nthreads));
3892 task_team->tt.tt_max_threads = nthreads;
3912 TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3915 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3922 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3923 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3924 if (task_team->tt.tt_threads_data != NULL) {
3926 for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3927 __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3929 __kmp_free(task_team->tt.tt_threads_data);
3930 task_team->tt.tt_threads_data = NULL;
3932 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3938 static void __kmp_free_task_pri_list(kmp_task_team_t *task_team) {
3939 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
3940 if (task_team->tt.tt_task_pri_list != NULL) {
3941 kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3948 task_team->tt.tt_task_pri_list = NULL;
3950 __kmp_release_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
3953 static inline void __kmp_task_team_init(kmp_task_team_t *task_team,
3957 if (!task_team->tt.tt_active || team_nth != task_team->tt.tt_nproc) {
3958 TCW_4(task_team->tt.tt_found_tasks, FALSE);
3959 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3960 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
3961 TCW_4(task_team->tt.tt_nproc, team_nth);
3962 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, team_nth);
3963 TCW_4(task_team->tt.tt_active, TRUE);
3973 kmp_task_team_t *task_team = NULL;
3982 task_team = __kmp_free_task_teams;
3983 TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3984 task_team->tt.tt_next = NULL;
3989 if (task_team == NULL) {
3995 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3996 __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3997 __kmp_init_bootstrap_lock(&task_team->tt.tt_task_pri_lock);
4003 &task_team->tt.tt_found_tasks, sizeof(task_team->tt.tt_found_tasks));
4006 CCAST(kmp_uint32 *, &task_team->tt.tt_active),
4007 sizeof(task_team->tt.tt_active));
4010 // task_team->tt.tt_threads_data = NULL;
4011 // task_team->tt.tt_max_threads = 0;
4012 // task_team->tt.tt_next = NULL;
4015 __kmp_task_team_init(task_team, team);
4017 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
4019 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
4020 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
4021 return task_team;
4027 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
4028 KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
4029 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
4034 KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
4035 task_team->tt.tt_next = __kmp_free_task_teams;
4036 TCW_PTR(__kmp_free_task_teams, task_team);
4047 kmp_task_team_t *task_team;
4052 while ((task_team = __kmp_free_task_teams) != NULL) {
4053 __kmp_free_task_teams = task_team->tt.tt_next;
4054 task_team->tt.tt_next = NULL;
4057 if (task_team->tt.tt_threads_data != NULL) {
4058 __kmp_free_task_threads_data(task_team);
4060 if (task_team->tt.tt_task_pri_list != NULL) {
4061 __kmp_free_task_pri_list(task_team);
4063 __kmp_free(task_team);
4070 // 1) a single task_team pointer
4079 node->task_team = current->task_team;
4081 thread->th.th_task_team = current->task_team = NULL;
4090 if (current->task_team) {
4091 __kmp_free_task_team(thread, current->task_team);
4095 current->task_team = next->task_team;
4099 thread->th.th_task_team = current->task_team;
4127 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
4142 "unreference task_team\n",
4167 // __kmp_task_team_setup: Create a task_team for the current team, but use
4181 20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p"
4190 // If this task_team hasn't been created yet, allocate it. It will be used in
4197 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p"
4205 // other task_team; make sure it is allocated and properly initialized. As
4207 // previous task_team struct(above), until they receive the signal to stop
4215 "task_team %p for team %d at parity=%d\n",
4220 kmp_task_team_t *task_team = team->t.t_task_team[other_team];
4221 __kmp_task_team_init(task_team, team);
4224 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d reset next task_team "
4236 kmp_task_team_t *task_team = team->t.t_task_team[i];
4237 if (KMP_TASKING_ENABLED(task_team)) {
4240 __kmp_enable_tasking(task_team, this_thr);
4241 for (int j = 0; j < task_team->tt.tt_nproc; ++j) {
4242 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j];
4259 // Toggle the th_task_state field, to switch which task_team this thread
4268 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
4283 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
4286 KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
4288 if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
4291 "(for unfinished_threads to reach 0) on task_team = %p\n",
4292 __kmp_gtid_from_thread(this_thr), task_team));
4298 &task_team->tt.tt_unfinished_threads),
4306 ("__kmp_task_team_wait: Primary T#%d deactivating task_team %p: "
4308 __kmp_gtid_from_thread(this_thr), task_team));
4309 TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
4310 TCW_SYNC_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4311 KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
4312 TCW_SYNC_4(task_team->tt.tt_active, FALSE);
4362 kmp_task_team_t *task_team = taskdata->td_task_team;
4367 // If task_team is NULL something went really bad...
4368 KMP_DEBUG_ASSERT(task_team != NULL);
4371 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];