Lines Matching defs:bt

210 void __kmp_dist_barrier_wakeup(enum barrier_type bt, kmp_team_t *team,
214 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
227 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
247 gtid, team->t.t_id, tid, bt));
372 gtid, team->t.t_id, tid, bt));
376 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
387 gtid, tid, bt));
389 thr_bar = &this_thr->th.th_bar[bt].bb;
414 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
423 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
429 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
461 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
472 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
503 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
525 __kmp_dist_barrier_wakeup(bt, team, tid + b->threads_per_group, nproc,
543 __kmp_dist_barrier_wakeup(bt, team, tid + 1, group_end, 1, tid);
552 gtid, team->t.t_id, tid, bt));
558 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
562 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
568 gtid, team->t.t_id, tid, bt));
594 kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
605 KMP_CACHE_PREFETCH(&other_threads[i + 1]->th.th_bar[bt].bb.b_arrived);
611 &other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state));
616 &other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state);
620 kmp_flag_64<> flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived,
654 gtid, team->t.t_id, tid, bt));
660 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
663 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
677 gtid, team->t.t_id, tid, bt));
701 KMP_CACHE_PREFETCH(&other_threads[i + 1]->th.th_bar[bt].bb.b_go);
708 team->t.t_id, i, &other_threads[i]->th.th_bar[bt].bb.b_go,
709 other_threads[i]->th.th_bar[bt].bb.b_go,
710 other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP));
711 kmp_flag_64<> flag(&other_threads[i]->th.th_bar[bt].bb.b_go,
735 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
745 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
762 gtid, team->t.t_id, tid, bt));
767 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
770 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
774 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
777 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
781 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
784 bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj));
788 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
791 bt, this_thr, gtid, tid, propagate_icvs USE_ITT_BUILD_ARG(itt_sync_obj));
796 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
800 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
803 kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
811 gtid, team->t.t_id, tid, bt));
826 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
830 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
835 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_arrived);
888 team->t.t_bar[bt].b_arrived = new_state;
890 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
894 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
898 gtid, team->t.t_id, tid, bt));
902 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
906 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
908 kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt];
929 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
939 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
957 gtid, team->t.t_id, tid, bt));
968 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
973 &other_threads[child_tid + 1]->th.th_bar[bt].bb.b_go);
1003 gtid, team->t.t_id, tid, bt));
1008 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
1012 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
1016 kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
1024 gtid, team->t.t_id, tid, bt));
1064 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1069 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1075 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived);
1111 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
1113 team->t.t_bar[bt].b_arrived = new_state;
1117 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1121 gtid, team->t.t_id, tid, bt));
1127 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
1131 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
1134 kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt];
1150 gtid, team->t.t_id, tid, bt));
1170 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
1180 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
1235 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1245 &other_threads[next_child_tid]->th.th_bar[bt].bb.b_go);
1278 gtid, team->t.t_id, tid, bt));
1291 static bool __kmp_init_hierarchical_barrier_thread(enum barrier_type bt,
1336 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1341 &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
1359 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
1363 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
1380 gtid, team->t.t_id, tid, bt));
1390 (void)__kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, tid,
1396 (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1404 : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state;
1437 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1466 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1508 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
1515 team->t.t_bar[bt].b_arrived = new_state;
1519 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
1524 gtid, team->t.t_id, tid, bt));
1528 enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
1532 kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
1541 gtid, team->t.t_id, tid, bt));
1558 thr_bar->offset + 1, bt,
1572 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
1605 team_change = __kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid,
1656 &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
1687 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1713 kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1735 gtid, team->t.t_id, tid, bt));
1768 static int __kmp_barrier_template(enum barrier_type bt, int gtid, int is_split,
1794 barrier_kind = __ompt_get_barrier_kind(bt, this_thr);
1837 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
1869 team->t.t_bar[bt].b_master_arrived += 1;
1871 this_thr->th.th_bar[bt].bb.b_worker_arrived += 1;
1884 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
1886 switch (__kmp_barrier_gather_pattern[bt]) {
1888 __kmp_dist_barrier_gather(bt, this_thr, gtid, tid,
1894 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]);
1895 __kmp_hyper_barrier_gather(bt, this_thr, gtid, tid,
1901 bt, this_thr, gtid, tid, reduce USE_ITT_BUILD_ARG(itt_sync_obj));
1906 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]);
1907 __kmp_tree_barrier_gather(bt, this_thr, gtid, tid,
1912 __kmp_linear_barrier_gather(bt, this_thr, gtid, tid,
1928 team->t.t_bar[bt].b_team_arrived += 1;
2002 bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
2004 switch (__kmp_barrier_release_pattern[bt]) {
2006 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
2007 __kmp_dist_barrier_release(bt, this_thr, gtid, tid,
2012 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
2013 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid,
2019 bt, this_thr, gtid, tid, FALSE USE_ITT_BUILD_ARG(itt_sync_obj));
2023 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
2024 __kmp_tree_barrier_release(bt, this_thr, gtid, tid,
2029 __kmp_linear_barrier_release(bt, this_thr, gtid, tid,
2053 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
2100 int __kmp_barrier(enum barrier_type bt, int gtid, int is_split,
2103 return __kmp_barrier_template<>(bt, gtid, is_split, reduce_size, reduce_data,
2131 void __kmp_end_split_barrier(enum barrier_type bt, int gtid) {
2134 KMP_DEBUG_ASSERT(bt < bs_last_barrier);
2141 switch (__kmp_barrier_release_pattern[bt]) {
2143 __kmp_dist_barrier_release(bt, this_thr, gtid, tid,
2148 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
2149 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid,
2154 __kmp_hierarchical_barrier_release(bt, this_thr, gtid, tid,
2159 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
2160 __kmp_tree_barrier_release(bt, this_thr, gtid, tid,
2165 __kmp_linear_barrier_release(bt, this_thr, gtid, tid,