Lines Matching defs:taskdata
403 kmp_taskdata_t *taskdata,
409 gtid, taskdata, pri));
444 __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
449 gtid, taskdata));
458 // Push taskdata.
459 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
466 KMP_FSYNC_RELEASING(taskdata); // releasing child
469 gtid, taskdata, thread_data->td.td_deque_ntasks,
479 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
484 if (UNLIKELY(taskdata->td_flags.hidden_helper &&
498 ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
500 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
503 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
508 gtid, counter, taskdata));
512 if (UNLIKELY(taskdata->td_flags.task_serial)) {
515 gtid, taskdata));
528 if (taskdata->td_flags.priority_specified && task->data2.priority > 0 &&
531 return __kmp_push_priority_task(gtid, thread, taskdata, task_team, pri);
549 __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
553 gtid, taskdata));
572 __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
577 gtid, taskdata));
590 taskdata; // Push taskdata
597 KMP_FSYNC_RELEASING(taskdata); // releasing child
600 gtid, taskdata, thread_data->td.td_deque_ntasks,
672 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
677 gtid, taskdata, current_task));
679 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
688 if (taskdata->td_flags.tiedness == TASK_TIED) {
689 __kmp_push_task_stack(gtid, thread, taskdata);
694 thread->th.th_current_task = taskdata;
696 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
697 taskdata->td_flags.tiedness == TASK_UNTIED);
698 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
699 taskdata->td_flags.tiedness == TASK_UNTIED);
700 taskdata->td_flags.started = 1;
701 taskdata->td_flags.executing = 1;
702 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
703 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
707 // need to store current thread state (in a thread or taskdata structure)
710 KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
739 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
749 &(taskdata->ompt_task_info.task_data));
751 taskdata->ompt_task_info.scheduling_parent = current_task;
760 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
761 if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
762 taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
768 &(taskdata->ompt_task_info.task_data), status,
779 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
784 gtid, loc_ref, taskdata, current_task));
786 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
789 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
793 gtid, counter, taskdata));
796 taskdata->td_flags.task_serial =
804 taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
806 taskdata->ompt_task_info.frame.exit_frame_flags =
813 &(taskdata->ompt_task_info.task_data),
814 TASK_TYPE_DETAILS_FORMAT(taskdata), 0, return_address);
821 loc_ref, taskdata));
885 // taskdata: task to free
887 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
890 taskdata));
893 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
894 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
895 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
896 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
897 KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
898 taskdata->td_flags.task_serial == 1);
899 KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
900 kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
905 taskdata->td_flags.freed = 1;
908 if (!taskdata->is_taskgraph) {
910 // deallocate the taskdata and shared variable blocks associated with this task
912 __kmp_fast_free(thread, taskdata);
914 __kmp_thread_free(thread, taskdata);
918 taskdata->td_flags.complete = 0;
919 taskdata->td_flags.started = 0;
920 taskdata->td_flags.freed = 0;
921 taskdata->td_flags.executing = 0;
922 taskdata->td_flags.task_serial =
923 (taskdata->td_parent->td_flags.final ||
924 taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser);
926 // taskdata->td_allow_completion_event.pending_events_count = 1;
927 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
928 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
930 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
934 KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
941 // taskdata: task to free
944 kmp_taskdata_t *taskdata,
949 (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
950 !taskdata->td_flags.proxy;
951 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
953 kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
958 kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
962 gtid, taskdata));
965 __kmp_free_task(gtid, taskdata, thread);
967 taskdata = parent_taskdata;
973 if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
974 if (taskdata->td_dephash) { // do we need to cleanup dephash?
975 int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
976 kmp_tasking_flags_t flags_old = taskdata->td_flags;
981 RCAST(kmp_int32 *, &taskdata->td_flags),
986 gtid, taskdata));
988 __kmp_dephash_free_entries(thread, taskdata->td_dephash);
995 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
1002 gtid, taskdata, children));
1013 static bool __kmp_track_children_task(kmp_taskdata_t *taskdata) {
1014 kmp_tasking_flags_t flags = taskdata->td_flags;
1019 KMP_ATOMIC_LD_ACQ(&taskdata->td_parent->td_incomplete_child_tasks) > 0;
1021 if (taskdata->td_taskgroup && taskdata->is_taskgraph)
1022 ret = ret || KMP_ATOMIC_LD_ACQ(&taskdata->td_taskgroup->count) > 0;
1039 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1044 // to avoid seg fault when we need to access taskdata->td_flags after free when using vanilla taskloop
1052 gtid, taskdata, resumed_task));
1054 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
1057 is_taskgraph = taskdata->is_taskgraph;
1062 if (taskdata->td_flags.tiedness == TASK_TIED) {
1063 __kmp_pop_task_stack(gtid, thread, taskdata);
1067 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
1070 kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
1074 gtid, counter, taskdata));
1079 KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
1080 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1087 gtid, taskdata, resumed_task));
1095 (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
1096 taskdata->td_flags.task_serial);
1097 if (taskdata->td_flags.task_serial) {
1099 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1113 if (UNLIKELY(taskdata->td_flags.destructors_thunk)) {
1119 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
1120 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
1121 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
1124 if (UNLIKELY(taskdata->td_flags.detachable == TASK_DETACHABLE)) {
1125 if (taskdata->td_allow_completion_event.type ==
1128 __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
1129 if (taskdata->td_allow_completion_event.type ==
1132 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1133 taskdata->td_flags.executing = 0; // suspend the finishing task
1143 // no access to taskdata after this point!
1144 // __kmp_fulfill_event might free taskdata at any time from now
1146 taskdata->td_flags.proxy = TASK_PROXY; // proxify!
1149 __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
1154 if (taskdata->td_target_data.async_handle != NULL) {
1170 taskdata->td_flags.complete = 1; // mark the task as completed
1172 taskdata->td_flags.onced = 1; // mark the task as ran once already
1182 if (__kmp_track_children_task(taskdata)) {
1183 __kmp_release_deps(gtid, taskdata);
1188 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
1191 if (taskdata->td_taskgroup && !taskdata->is_taskgraph)
1193 if (taskdata->td_taskgroup)
1195 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1200 __kmp_release_deps(gtid, taskdata);
1206 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1207 taskdata->td_flags.executing = 0; // suspend the finishing task
1210 if (taskdata->td_flags.hidden_helper) {
1219 gtid, taskdata, children));
1227 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
1234 if (is_taskgraph && __kmp_track_children_task(taskdata) &&
1235 taskdata->td_taskgroup) {
1239 // taskdata->started, etc. If we release the barrier earlier, these
1242 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1248 gtid, taskdata, resumed_task));
1445 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1462 kmp_taskdata_t *taskdata;
1556 // Avoid double allocation here by combining shareds with taskdata
1558 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1561 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1565 task = KMP_TASKDATA_TO_TASK(taskdata);
1567 // Make sure task & taskdata are aligned appropriately
1569 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1572 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1576 // Avoid double allocation here by combining shareds with taskdata
1577 task->shareds = &((char *)taskdata)[shareds_offset];
1587 taskdata->td_task_id = KMP_GEN_TASK_ID();
1588 taskdata->td_team = thread->th.th_team;
1589 taskdata->td_alloc_thread = thread;
1590 taskdata->td_parent = parent_task;
1591 taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1592 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1593 taskdata->td_ident = loc_ref;
1594 taskdata->td_taskwait_ident = NULL;
1595 taskdata->td_taskwait_counter = 0;
1596 taskdata->td_taskwait_thread = 0;
1597 KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1600 copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1602 taskdata->td_flags = *flags;
1603 taskdata->td_task_team = thread->th.th_task_team;
1604 taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1605 taskdata->td_flags.tasktype = TASK_EXPLICIT;
1610 taskdata->td_team = shadow_thread->th.th_team;
1611 taskdata->td_task_team = shadow_thread->th.th_task_team;
1615 taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1618 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1624 taskdata->td_flags.task_serial =
1625 (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1626 taskdata->td_flags.tasking_ser || flags->merged_if0);
1628 taskdata->td_flags.started = 0;
1629 taskdata->td_flags.executing = 0;
1630 taskdata->td_flags.complete = 0;
1631 taskdata->td_flags.freed = 0;
1633 taskdata->td_flags.onced = 0;
1635 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1637 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1638 taskdata->td_taskgroup =
1640 taskdata->td_dephash = NULL;
1641 taskdata->td_depnode = NULL;
1642 taskdata->td_target_data.async_handle = NULL;
1644 taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1646 taskdata->td_last_tied = taskdata;
1647 taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1650 __ompt_task_init(taskdata, gtid);
1654 if (__kmp_track_children_task(taskdata)) {
1660 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1661 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1664 taskdata->td_flags.task_serial = FALSE;
1674 taskdata->is_taskgraph = 1;
1675 taskdata->tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
1676 taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
1680 gtid, taskdata, taskdata->td_parent));
1758 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1763 gtid, taskdata, current_task));
1765 if (UNLIKELY(taskdata->td_flags.proxy == TASK_PROXY &&
1766 taskdata->td_flags.complete == 1)) {
1772 gtid, taskdata));
1778 gtid, taskdata, current_task));
1795 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1800 if (taskdata->td_flags.proxy != TASK_PROXY) {
1810 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1834 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1835 taskdata->td_last_tied = current_task->td_last_tied;
1836 KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1869 taskdata->ompt_task_info.dispatch_chunk.iterations > 0)) {
1871 instance.ptr = &(taskdata->ompt_task_info.dispatch_chunk);
1874 &(team_info->parallel_data), &(taskdata->ompt_task_info.task_data),
1876 taskdata->ompt_task_info.dispatch_chunk = {0, 0};
1888 __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1898 KMP_FSYNC_ACQUIRED(taskdata); // acquired self (new task)
1902 if (taskdata->td_target_data.async_handle != NULL) {
1907 tgt_target_nowait_query(&taskdata->td_target_data.async_handle);
1912 if (taskdata->td_flags.native) {
1927 KMP_FSYNC_CANCEL(taskdata); // destroy self (just executed)
1928 KMP_FSYNC_RELEASING(taskdata->td_parent); // releasing parent
1938 if (taskdata->td_flags.proxy != TASK_PROXY) {
1942 if (taskdata->td_flags.tiedness == TASK_TIED) {
1943 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1951 else if (UNLIKELY(ompt_enabled.enabled && taskdata->td_flags.target)) {
1959 gtid, taskdata, current_task));
2229 kmp_taskdata_t *taskdata = nullptr;
2239 taskdata = thread->th.th_current_task;
2246 my_task_data = &(taskdata->ompt_task_info.task_data);
2249 taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
2270 taskdata->td_taskwait_counter += 1;
2271 taskdata->td_taskwait_ident = loc_ref;
2272 taskdata->td_taskwait_thread = gtid + 1;
2282 !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
2295 &(taskdata->td_incomplete_child_tasks)),
2297 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
2305 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with children
2310 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2324 taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
2331 gtid, taskdata));
2361 kmp_taskdata_t *taskdata = NULL;
2374 taskdata = thread->th.th_current_task;
2381 taskdata->td_taskwait_counter += 1;
2382 taskdata->td_taskwait_ident = loc_ref;
2383 taskdata->td_taskwait_thread = gtid + 1;
2391 if (!taskdata->td_flags.team_serial) {
2416 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2421 gtid, taskdata));
2884 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2890 tg_new->parent = taskdata->td_taskgroup;
2894 taskdata->td_taskgroup = tg_new;
2902 ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2918 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2919 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2929 my_task_data = taskdata->ompt_task_info.task_data;
2944 taskdata->td_taskwait_counter += 1;
2945 taskdata->td_taskwait_ident = loc;
2946 taskdata->td_taskwait_thread = gtid + 1;
2964 if (!taskdata->td_flags.team_serial ||
2976 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2988 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with descendants
3043 taskdata->td_taskgroup = taskgroup->parent;
3047 gtid, taskdata));
3062 kmp_taskdata_t *taskdata;
3101 taskdata = thread_data->td.td_deque[target];
3102 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3119 taskdata = NULL;
3122 taskdata = thread_data->td.td_deque[target];
3123 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3126 taskdata = NULL;
3129 if (taskdata == NULL) {
3154 task = KMP_TASKDATA_TO_TASK(taskdata);
3163 kmp_taskdata_t *taskdata;
3200 taskdata = thread_data->td.td_deque[tail];
3202 if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
3221 gtid, taskdata, thread_data->td.td_deque_ntasks,
3224 task = KMP_TASKDATA_TO_TASK(taskdata);
3237 kmp_taskdata_t *taskdata;
3284 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
3285 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3302 taskdata = NULL;
3305 taskdata = victim_td->td.td_deque[target];
3306 if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
3309 taskdata = NULL;
3312 if (taskdata == NULL) {
3355 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
3358 task = KMP_TASKDATA_TO_TASK(taskdata);
4361 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4362 kmp_task_team_t *task_team = taskdata->td_task_team;
4365 taskdata, tid));
4378 tid, taskdata));
4387 taskdata, tid));
4409 taskdata, tid));
4422 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
4431 taskdata, tid));
4456 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4457 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
4458 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4459 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
4460 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
4462 taskdata->td_flags.complete = 1; // mark the task as completed
4464 taskdata->td_flags.onced = 1;
4467 if (taskdata->td_taskgroup)
4468 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
4472 KMP_ATOMIC_OR(&taskdata->td_incomplete_child_tasks, PROXY_TASK_FLAG);
4475 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4481 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
4485 KMP_ATOMIC_AND(&taskdata->td_incomplete_child_tasks, ~PROXY_TASK_FLAG);
4489 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4492 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4493 KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
4498 while ((KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) &
4502 __kmp_release_deps(gtid, taskdata);
4503 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
4516 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4519 gtid, taskdata));
4521 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4523 __kmp_first_top_half_finish_proxy(taskdata);
4524 __kmp_second_top_half_finish_proxy(taskdata);
4529 gtid, taskdata));
4534 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4538 kmp_team_t *team = taskdata->td_team;
4580 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4585 taskdata));
4587 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4589 __kmp_first_top_half_finish_proxy(taskdata);
4593 __kmp_second_top_half_finish_proxy(taskdata);
4598 taskdata));
4615 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4623 if (taskdata->td_flags.proxy == TASK_PROXY) {
4645 kmp_team_t *team = taskdata->td_team;
4659 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
4673 kmp_taskdata_t *taskdata;
4690 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
4692 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
4694 KMP_MEMCPY(taskdata, taskdata_src, task_size);
4696 task = KMP_TASKDATA_TO_TASK(taskdata);
4700 if (!taskdata->is_taskgraph || taskloop_recur)
4701 taskdata->td_task_id = KMP_GEN_TASK_ID();
4702 else if (taskdata->is_taskgraph &&
4704 taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
4706 taskdata->td_task_id = KMP_GEN_TASK_ID();
4710 task->shareds = &((char *)taskdata)[shareds_offset];
4714 taskdata->td_alloc_thread = thread;
4715 taskdata->td_parent = parent_task;
4717 taskdata->td_taskgroup = parent_task->td_taskgroup;
4720 if (taskdata->td_flags.tiedness == TASK_TIED)
4721 taskdata->td_last_tied = taskdata;
4725 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
4731 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
4732 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
4737 thread, taskdata, taskdata->td_parent));
4740 __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
4758 const kmp_taskdata_t *taskdata;
4764 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
4771 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
4779 if (!taskdata->td_flags.native) {
4783 if (taskdata->td_size_loop_bounds == 4) {
4792 (void)taskdata;
4801 if (!taskdata->td_flags.native) {
4805 if (taskdata->td_size_loop_bounds == 4) {
4821 if (!taskdata->td_flags.native) {
4825 if (taskdata->td_size_loop_bounds == 4) {
4840 if (!taskdata->td_flags.native) {
4844 if (taskdata->td_size_loop_bounds == 4) {
5031 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5036 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5087 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5093 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5152 thread->th.th_current_task = taskdata->td_parent;
5177 new_task_data->tdg = taskdata->tdg;
5211 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5240 gtid, taskdata, lower, upper, st, grainsize, sched, modifier,
5321 // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
5323 taskdata->td_flags.task_serial = 1;
5324 taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
5332 // !taskdata->td_flags.native => currently force linear spawning of tasks
5334 } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
5442 kmp_taskdata_t *taskdata = thread->th.th_current_task;
5444 if (!taskdata)
5447 return &taskdata->td_target_data.async_handle;
5463 kmp_taskdata_t *taskdata = thread->th.th_current_task;
5465 if (!taskdata)
5468 return taskdata->td_task_team != NULL;