Lines Matching +full:counter +full:- +full:clockwise
2 * kmp_lock.cpp -- lock-related functions
5 //===----------------------------------------------------------------------===//
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
48 x = ~((kmp_uint32)0) - 2; in __kmp_validate_locks()
49 y = x - 2; in __kmp_validate_locks()
52 kmp_uint32 z = (x - y); in __kmp_validate_locks()
59 /* ------------------------------------------------------------------------ */
62 // For the non-nested locks, we can only assume that the first 4 bytes were
64 // compiler only allocates a 4 byte pointer on IA-32 architecture. On
68 // entire 8 bytes were allocated for nested locks on all 64-bit platforms.
71 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1; in __kmp_get_tas_lock_owner()
75 return lck->lk.depth_locked != -1; in __kmp_is_tas_lock_nestable()
83 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll); in __kmp_acquire_tas_lock_timed_template()
92 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free && in __kmp_acquire_tas_lock_timed_template()
93 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) { in __kmp_acquire_tas_lock_timed_template()
112 } while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free || in __kmp_acquire_tas_lock_timed_template()
113 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)); in __kmp_acquire_tas_lock_timed_template()
139 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free && in __kmp_test_tas_lock()
140 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) { in __kmp_test_tas_lock()
161 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas)); in __kmp_release_tas_lock()
176 if (__kmp_get_tas_lock_owner(lck) == -1) { in __kmp_release_tas_lock_with_checks()
187 lck->lk.poll = KMP_LOCK_FREE(tas); in __kmp_init_tas_lock()
190 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; } in __kmp_destroy_tas_lock()
198 if (__kmp_get_tas_lock_owner(lck) != -1) { in __kmp_destroy_tas_lock_with_checks()
210 lck->lk.depth_locked += 1; in __kmp_acquire_nested_tas_lock()
214 lck->lk.depth_locked = 1; in __kmp_acquire_nested_tas_lock()
234 retval = ++lck->lk.depth_locked; in __kmp_test_nested_tas_lock()
239 retval = lck->lk.depth_locked = 1; in __kmp_test_nested_tas_lock()
257 if (--(lck->lk.depth_locked) == 0) { in __kmp_release_nested_tas_lock()
271 if (__kmp_get_tas_lock_owner(lck) == -1) { in __kmp_release_nested_tas_lock_with_checks()
282 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks in __kmp_init_nested_tas_lock()
287 lck->lk.depth_locked = 0; in __kmp_destroy_nested_tas_lock()
295 if (__kmp_get_tas_lock_owner(lck) != -1) { in __kmp_destroy_nested_tas_lock_with_checks()
303 /* ------------------------------------------------------------------------ */
309 // the compiler for non-nested locks / allocate nested locks on the heap).
312 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1; in __kmp_get_futex_lock_owner()
316 return lck->lk.depth_locked != -1; in __kmp_is_futex_lock_nestable()
326 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll)); in __kmp_acquire_futex_lock_timed_template()
334 lck, lck->lk.poll, gtid)); in __kmp_acquire_futex_lock_timed_template()
339 &(lck->lk.poll), KMP_LOCK_FREE(futex), in __kmp_acquire_futex_lock_timed_template()
357 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val, in __kmp_acquire_futex_lock_timed_template()
362 lck, lck->lk.poll, gtid)); in __kmp_acquire_futex_lock_timed_template()
369 lck->lk.poll, gtid)); in __kmp_acquire_futex_lock_timed_template()
378 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL, in __kmp_acquire_futex_lock_timed_template()
397 lck->lk.poll, gtid)); in __kmp_acquire_futex_lock_timed_template()
420 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex), in __kmp_test_futex_lock()
442 lck, lck->lk.poll, gtid)); in __kmp_release_futex_lock()
446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex)); in __kmp_release_futex_lock()
456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), in __kmp_release_futex_lock()
463 lck->lk.poll, gtid)); in __kmp_release_futex_lock()
477 if (__kmp_get_futex_lock_owner(lck) == -1) { in __kmp_release_futex_lock_with_checks()
488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex)); in __kmp_init_futex_lock()
491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; } in __kmp_destroy_futex_lock()
499 if (__kmp_get_futex_lock_owner(lck) != -1) { in __kmp_destroy_futex_lock_with_checks()
511 lck->lk.depth_locked += 1; in __kmp_acquire_nested_futex_lock()
515 lck->lk.depth_locked = 1; in __kmp_acquire_nested_futex_lock()
535 retval = ++lck->lk.depth_locked; in __kmp_test_nested_futex_lock()
540 retval = lck->lk.depth_locked = 1; in __kmp_test_nested_futex_lock()
558 if (--(lck->lk.depth_locked) == 0) { in __kmp_release_nested_futex_lock()
572 if (__kmp_get_futex_lock_owner(lck) == -1) { in __kmp_release_nested_futex_lock_with_checks()
583 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks in __kmp_init_nested_futex_lock()
588 lck->lk.depth_locked = 0; in __kmp_destroy_nested_futex_lock()
596 if (__kmp_get_futex_lock_owner(lck) != -1) { in __kmp_destroy_nested_futex_lock_with_checks()
604 /* ------------------------------------------------------------------------ */
608 return std::atomic_load_explicit(&lck->lk.owner_id, in __kmp_get_ticket_lock_owner()
609 std::memory_order_relaxed) - in __kmp_get_ticket_lock_owner()
614 return std::atomic_load_explicit(&lck->lk.depth_locked, in __kmp_is_ticket_lock_nestable()
615 std::memory_order_relaxed) != -1; in __kmp_is_ticket_lock_nestable()
627 &lck->lk.next_ticket, 1U, std::memory_order_relaxed); in __kmp_acquire_ticket_lock_timed_template()
630 if (std::atomic_load_explicit(&lck->lk.now_serving, in __kmp_acquire_ticket_lock_timed_template()
636 if (std::atomic_load_explicit(&lck->lk.now_serving, in __kmp_acquire_ticket_lock_timed_template()
640 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck); in __kmp_acquire_ticket_lock_timed_template()
653 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_acquire_ticket_lock_with_checks()
657 if (lck->lk.self != lck) { in __kmp_acquire_ticket_lock_with_checks()
669 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, in __kmp_acquire_ticket_lock_with_checks()
675 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket, in __kmp_test_ticket_lock()
678 if (std::atomic_load_explicit(&lck->lk.now_serving, in __kmp_test_ticket_lock()
682 &lck->lk.next_ticket, &my_ticket, next_ticket, in __kmp_test_ticket_lock()
694 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_test_ticket_lock_with_checks()
698 if (lck->lk.self != lck) { in __kmp_test_ticket_lock_with_checks()
708 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, in __kmp_test_ticket_lock_with_checks()
715 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket, in __kmp_release_ticket_lock()
716 std::memory_order_relaxed) - in __kmp_release_ticket_lock()
717 std::atomic_load_explicit(&lck->lk.now_serving, in __kmp_release_ticket_lock()
720 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U, in __kmp_release_ticket_lock()
732 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_release_ticket_lock_with_checks()
736 if (lck->lk.self != lck) { in __kmp_release_ticket_lock_with_checks()
742 if (__kmp_get_ticket_lock_owner(lck) == -1) { in __kmp_release_ticket_lock_with_checks()
749 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); in __kmp_release_ticket_lock_with_checks()
754 lck->lk.location = NULL; in __kmp_init_ticket_lock()
755 lck->lk.self = lck; in __kmp_init_ticket_lock()
756 std::atomic_store_explicit(&lck->lk.next_ticket, 0U, in __kmp_init_ticket_lock()
758 std::atomic_store_explicit(&lck->lk.now_serving, 0U, in __kmp_init_ticket_lock()
761 &lck->lk.owner_id, 0, in __kmp_init_ticket_lock()
764 &lck->lk.depth_locked, -1, in __kmp_init_ticket_lock()
765 std::memory_order_relaxed); // -1 => not a nested lock. in __kmp_init_ticket_lock()
766 std::atomic_store_explicit(&lck->lk.initialized, true, in __kmp_init_ticket_lock()
771 std::atomic_store_explicit(&lck->lk.initialized, false, in __kmp_destroy_ticket_lock()
773 lck->lk.self = NULL; in __kmp_destroy_ticket_lock()
774 lck->lk.location = NULL; in __kmp_destroy_ticket_lock()
775 std::atomic_store_explicit(&lck->lk.next_ticket, 0U, in __kmp_destroy_ticket_lock()
777 std::atomic_store_explicit(&lck->lk.now_serving, 0U, in __kmp_destroy_ticket_lock()
779 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); in __kmp_destroy_ticket_lock()
780 std::atomic_store_explicit(&lck->lk.depth_locked, -1, in __kmp_destroy_ticket_lock()
787 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_destroy_ticket_lock_with_checks()
791 if (lck->lk.self != lck) { in __kmp_destroy_ticket_lock_with_checks()
797 if (__kmp_get_ticket_lock_owner(lck) != -1) { in __kmp_destroy_ticket_lock_with_checks()
809 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1, in __kmp_acquire_nested_ticket_lock()
814 std::atomic_store_explicit(&lck->lk.depth_locked, 1, in __kmp_acquire_nested_ticket_lock()
816 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, in __kmp_acquire_nested_ticket_lock()
826 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_acquire_nested_ticket_lock_with_checks()
830 if (lck->lk.self != lck) { in __kmp_acquire_nested_ticket_lock_with_checks()
845 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1, in __kmp_test_nested_ticket_lock()
851 std::atomic_store_explicit(&lck->lk.depth_locked, 1, in __kmp_test_nested_ticket_lock()
853 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, in __kmp_test_nested_ticket_lock()
864 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_test_nested_ticket_lock_with_checks()
868 if (lck->lk.self != lck) { in __kmp_test_nested_ticket_lock_with_checks()
880 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1, in __kmp_release_nested_ticket_lock()
881 std::memory_order_relaxed) - in __kmp_release_nested_ticket_lock()
883 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); in __kmp_release_nested_ticket_lock()
894 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_release_nested_ticket_lock_with_checks()
898 if (lck->lk.self != lck) { in __kmp_release_nested_ticket_lock_with_checks()
904 if (__kmp_get_ticket_lock_owner(lck) == -1) { in __kmp_release_nested_ticket_lock_with_checks()
915 std::atomic_store_explicit(&lck->lk.depth_locked, 0, in __kmp_init_nested_ticket_lock()
917 // >= 0 for nestable locks, -1 for simple locks in __kmp_init_nested_ticket_lock()
922 std::atomic_store_explicit(&lck->lk.depth_locked, 0, in __kmp_destroy_nested_ticket_lock()
930 if (!std::atomic_load_explicit(&lck->lk.initialized, in __kmp_destroy_nested_ticket_lock_with_checks()
934 if (lck->lk.self != lck) { in __kmp_destroy_nested_ticket_lock_with_checks()
940 if (__kmp_get_ticket_lock_owner(lck) != -1) { in __kmp_destroy_nested_ticket_lock_with_checks()
949 return lck->lk.location; in __kmp_get_ticket_lock_location()
954 lck->lk.location = loc; in __kmp_set_ticket_lock_location()
958 return lck->lk.flags; in __kmp_get_ticket_lock_flags()
963 lck->lk.flags = flags; in __kmp_set_ticket_lock_flags()
966 /* ------------------------------------------------------------------------ */
971 UINT_MAX or -1, 0 means lock is held, nobody on queue
978 Acquire(0,0) = -1 ,0
980 Acquire(-1,0) = h ,h h > 0
981 Release(-1,0) = 0 ,0
983 Release(h,h) = -1 ,0 h > 0
989 +-----+
990 | 0, 0|------- release -------> Error
991 +-----+
997 +-----+
998 |-1, 0|
999 +-----+
1005 +-----+
1007 +-----+
1013 +-----+
1014 | h, t|----- acquire, release loopback ---+
1015 +-----+ |
1018 +------------------------------------+
1053 gtid + 1, this_thr->th.th_spin_here, in __kmp_dump_queuing_lock()
1054 this_thr->th.th_next_waiting, head_id, tail_id); in __kmp_dump_queuing_lock()
1056 __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id); in __kmp_dump_queuing_lock()
1058 if (lck->lk.head_id >= 1) { in __kmp_dump_queuing_lock()
1059 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting; in __kmp_dump_queuing_lock()
1061 __kmp_printf_no_lock("-> %d ", t); in __kmp_dump_queuing_lock()
1062 t = __kmp_threads[t - 1]->th.th_next_waiting; in __kmp_dump_queuing_lock()
1065 __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id); in __kmp_dump_queuing_lock()
1072 return TCR_4(lck->lk.owner_id) - 1; in __kmp_get_queuing_lock_owner()
1076 return lck->lk.depth_locked != -1; in __kmp_is_queuing_lock_nestable()
1087 volatile kmp_int32 *head_id_p = &lck->lk.head_id; in __kmp_acquire_queuing_lock_timed_template()
1088 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; in __kmp_acquire_queuing_lock_timed_template()
1100 spin_here_p = &this_thr->th.th_spin_here; in __kmp_acquire_queuing_lock_timed_template()
1106 if (this_thr->th.th_next_waiting != 0) in __kmp_acquire_queuing_lock_timed_template()
1110 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); in __kmp_acquire_queuing_lock_timed_template()
1130 case -1: { in __kmp_acquire_queuing_lock_timed_template()
1140 /* try (-1,0)->(tid,tid) */ in __kmp_acquire_queuing_lock_timed_template()
1142 KMP_PACK_64(-1, 0), in __kmp_acquire_queuing_lock_timed_template()
1146 TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)"); in __kmp_acquire_queuing_lock_timed_template()
1161 /* try (h,t) or (h,h)->(h,tid) */ in __kmp_acquire_queuing_lock_timed_template()
1166 TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)"); in __kmp_acquire_queuing_lock_timed_template()
1179 /* try (0,0)->(-1,0) */ in __kmp_acquire_queuing_lock_timed_template()
1181 /* only legal transition out of head = 0 is head = -1 with no change to in __kmp_acquire_queuing_lock_timed_template()
1183 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1); in __kmp_acquire_queuing_lock_timed_template()
1200 this_thr->th.ompt_thread_info.state = prev_state; in __kmp_acquire_queuing_lock_timed_template()
1201 this_thr->th.ompt_thread_info.wait_id = 0; in __kmp_acquire_queuing_lock_timed_template()
1215 prev_state = this_thr->th.ompt_thread_info.state; in __kmp_acquire_queuing_lock_timed_template()
1216 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck; in __kmp_acquire_queuing_lock_timed_template()
1217 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock; in __kmp_acquire_queuing_lock_timed_template()
1223 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1); in __kmp_acquire_queuing_lock_timed_template()
1225 tail_thr->th.th_next_waiting = gtid + 1; in __kmp_acquire_queuing_lock_timed_template()
1242 if (this_thr->th.th_next_waiting != 0) in __kmp_acquire_queuing_lock_timed_template()
1245 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); in __kmp_acquire_queuing_lock_timed_template()
1256 this_thr->th.ompt_thread_info.state = prev_state; in __kmp_acquire_queuing_lock_timed_template()
1257 this_thr->th.ompt_thread_info.wait_id = 0; in __kmp_acquire_queuing_lock_timed_template()
1287 if (lck->lk.initialized != lck) { in __kmp_acquire_queuing_lock_with_checks()
1299 lck->lk.owner_id = gtid + 1; in __kmp_acquire_queuing_lock_with_checks()
1304 volatile kmp_int32 *head_id_p = &lck->lk.head_id; in __kmp_test_queuing_lock()
1315 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here); in __kmp_test_queuing_lock()
1321 /* try (0,0)->(-1,0) */ in __kmp_test_queuing_lock()
1322 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) { in __kmp_test_queuing_lock()
1338 if (lck->lk.initialized != lck) { in __kmp_test_queuing_lock_with_checks()
1348 lck->lk.owner_id = gtid + 1; in __kmp_test_queuing_lock_with_checks()
1354 volatile kmp_int32 *head_id_p = &lck->lk.head_id; in __kmp_release_queuing_lock()
1355 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; in __kmp_release_queuing_lock()
1367 if (this_thr->th.th_spin_here) in __kmp_release_queuing_lock()
1369 if (this_thr->th.th_next_waiting != 0) in __kmp_release_queuing_lock()
1372 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here); in __kmp_release_queuing_lock()
1373 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); in __kmp_release_queuing_lock()
1391 0); /* holding the lock, head must be -1 or queue head */ in __kmp_release_queuing_lock()
1393 if (head == -1) { /* nobody on queue */ in __kmp_release_queuing_lock()
1394 /* try (-1,0)->(0,0) */ in __kmp_release_queuing_lock()
1395 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) { in __kmp_release_queuing_lock()
1405 /* nothing to do - no other thread is trying to shift blame */ in __kmp_release_queuing_lock()
1420 /* try (h,h)->(-1,0) */ in __kmp_release_queuing_lock()
1423 KMP_PACK_64(-1, 0)); in __kmp_release_queuing_lock()
1425 TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)"); in __kmp_release_queuing_lock()
1430 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1); in __kmp_release_queuing_lock()
1432 waiting_id_p = &head_thr->th.th_next_waiting; in __kmp_release_queuing_lock()
1441 /* try (h,t)->(h',t) or (t,t) */ in __kmp_release_queuing_lock()
1448 TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)"); in __kmp_release_queuing_lock()
1455 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1); in __kmp_release_queuing_lock()
1467 head_thr->th.th_next_waiting = 0; in __kmp_release_queuing_lock()
1474 head_thr->th.th_spin_here = FALSE; in __kmp_release_queuing_lock()
1500 if (lck->lk.initialized != lck) { in __kmp_release_queuing_lock_with_checks()
1506 if (__kmp_get_queuing_lock_owner(lck) == -1) { in __kmp_release_queuing_lock_with_checks()
1512 lck->lk.owner_id = 0; in __kmp_release_queuing_lock_with_checks()
1517 lck->lk.location = NULL; in __kmp_init_queuing_lock()
1518 lck->lk.head_id = 0; in __kmp_init_queuing_lock()
1519 lck->lk.tail_id = 0; in __kmp_init_queuing_lock()
1520 lck->lk.next_ticket = 0; in __kmp_init_queuing_lock()
1521 lck->lk.now_serving = 0; in __kmp_init_queuing_lock()
1522 lck->lk.owner_id = 0; // no thread owns the lock. in __kmp_init_queuing_lock()
1523 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. in __kmp_init_queuing_lock()
1524 lck->lk.initialized = lck; in __kmp_init_queuing_lock()
1530 lck->lk.initialized = NULL; in __kmp_destroy_queuing_lock()
1531 lck->lk.location = NULL; in __kmp_destroy_queuing_lock()
1532 lck->lk.head_id = 0; in __kmp_destroy_queuing_lock()
1533 lck->lk.tail_id = 0; in __kmp_destroy_queuing_lock()
1534 lck->lk.next_ticket = 0; in __kmp_destroy_queuing_lock()
1535 lck->lk.now_serving = 0; in __kmp_destroy_queuing_lock()
1536 lck->lk.owner_id = 0; in __kmp_destroy_queuing_lock()
1537 lck->lk.depth_locked = -1; in __kmp_destroy_queuing_lock()
1542 if (lck->lk.initialized != lck) { in __kmp_destroy_queuing_lock_with_checks()
1548 if (__kmp_get_queuing_lock_owner(lck) != -1) { in __kmp_destroy_queuing_lock_with_checks()
1560 lck->lk.depth_locked += 1; in __kmp_acquire_nested_queuing_lock()
1565 lck->lk.depth_locked = 1; in __kmp_acquire_nested_queuing_lock()
1567 lck->lk.owner_id = gtid + 1; in __kmp_acquire_nested_queuing_lock()
1576 if (lck->lk.initialized != lck) { in __kmp_acquire_nested_queuing_lock_with_checks()
1591 retval = ++lck->lk.depth_locked; in __kmp_test_nested_queuing_lock()
1596 retval = lck->lk.depth_locked = 1; in __kmp_test_nested_queuing_lock()
1598 lck->lk.owner_id = gtid + 1; in __kmp_test_nested_queuing_lock()
1606 if (lck->lk.initialized != lck) { in __kmp_test_nested_queuing_lock_with_checks()
1619 if (--(lck->lk.depth_locked) == 0) { in __kmp_release_nested_queuing_lock()
1621 lck->lk.owner_id = 0; in __kmp_release_nested_queuing_lock()
1633 if (lck->lk.initialized != lck) { in __kmp_release_nested_queuing_lock_with_checks()
1639 if (__kmp_get_queuing_lock_owner(lck) == -1) { in __kmp_release_nested_queuing_lock_with_checks()
1650 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks in __kmp_init_nested_queuing_lock()
1655 lck->lk.depth_locked = 0; in __kmp_destroy_nested_queuing_lock()
1661 if (lck->lk.initialized != lck) { in __kmp_destroy_nested_queuing_lock_with_checks()
1667 if (__kmp_get_queuing_lock_owner(lck) != -1) { in __kmp_destroy_nested_queuing_lock_with_checks()
1676 return lck->lk.location; in __kmp_get_queuing_lock_location()
1681 lck->lk.location = loc; in __kmp_set_queuing_lock_location()
1685 return lck->lk.flags; in __kmp_get_queuing_lock_flags()
1690 lck->lk.flags = flags; in __kmp_set_queuing_lock_flags()
1720 /*A version of XBegin which returns -1 on speculation, and the value of EAX on
1724 int res = -1; in _xbegin()
1758 'res=-1' may be dropped as being dead, whereas we do need the assignment on in _xbegin()
1759 the successful (i.e., non-abort) path. */ in _xbegin()
1761 " .long 1f-1b-6\n" in _xbegin()
1786 // clang-format off
1793 // clang-format on
1815 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0, in __kmp_init_speculative_stats()
1816 sizeof(lck->stats)); in __kmp_init_speculative_stats()
1817 lck->stats.next = lck; in __kmp_init_speculative_stats()
1818 lck->stats.prev = lck; in __kmp_init_speculative_stats()
1820 KMP_ASSERT(lck->stats.next->stats.prev == lck); in __kmp_init_speculative_stats()
1821 KMP_ASSERT(lck->stats.prev->stats.next == lck); in __kmp_init_speculative_stats()
1830 lck->stats.next = liveLocks.stats.next; in __kmp_remember_lock()
1831 lck->stats.prev = &liveLocks; in __kmp_remember_lock()
1834 lck->stats.next->stats.prev = lck; in __kmp_remember_lock()
1836 KMP_ASSERT(lck->stats.next->stats.prev == lck); in __kmp_remember_lock()
1837 KMP_ASSERT(lck->stats.prev->stats.next == lck); in __kmp_remember_lock()
1843 KMP_ASSERT(lck->stats.next->stats.prev == lck); in __kmp_forget_lock()
1844 KMP_ASSERT(lck->stats.prev->stats.next == lck); in __kmp_forget_lock()
1846 kmp_adaptive_lock_info_t *n = lck->stats.next; in __kmp_forget_lock()
1847 kmp_adaptive_lock_info_t *p = lck->stats.prev; in __kmp_forget_lock()
1849 n->stats.prev = p; in __kmp_forget_lock()
1850 p->stats.next = n; in __kmp_forget_lock()
1854 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0, in __kmp_zero_speculative_stats()
1855 sizeof(lck->stats)); in __kmp_zero_speculative_stats()
1861 kmp_adaptive_lock_statistics_t volatile *s = &lck->stats; in __kmp_add_stats()
1863 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts; in __kmp_add_stats()
1864 t->successfulSpeculations += s->successfulSpeculations; in __kmp_add_stats()
1865 t->hardFailedSpeculations += s->hardFailedSpeculations; in __kmp_add_stats()
1866 t->softFailedSpeculations += s->softFailedSpeculations; in __kmp_add_stats()
1867 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires; in __kmp_add_stats()
1868 t->lemmingYields += s->lemmingYields; in __kmp_add_stats()
1888 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) { in __kmp_print_speculative_stats()
1893 t->nonSpeculativeAcquires + t->successfulSpeculations; in __kmp_print_speculative_stats()
1894 kmp_uint32 totalSpeculations = t->successfulSpeculations + in __kmp_print_speculative_stats()
1895 t->hardFailedSpeculations + in __kmp_print_speculative_stats()
1896 t->softFailedSpeculations; in __kmp_print_speculative_stats()
1901 if (strcmp(__kmp_speculative_statsfile, "-") == 0) { in __kmp_print_speculative_stats()
1918 fprintf(statsFile, " Non-speculative acquire attempts : %10d\n", in __kmp_print_speculative_stats()
1919 t->nonSpeculativeAcquireAttempts); in __kmp_print_speculative_stats()
1923 t->successfulSpeculations, in __kmp_print_speculative_stats()
1924 percent(t->successfulSpeculations, totalSections)); in __kmp_print_speculative_stats()
1925 fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n", in __kmp_print_speculative_stats()
1926 t->nonSpeculativeAcquires, in __kmp_print_speculative_stats()
1927 percent(t->nonSpeculativeAcquires, totalSections)); in __kmp_print_speculative_stats()
1929 t->lemmingYields); in __kmp_print_speculative_stats()
1934 t->successfulSpeculations, in __kmp_print_speculative_stats()
1935 percent(t->successfulSpeculations, totalSpeculations)); in __kmp_print_speculative_stats()
1937 t->softFailedSpeculations, in __kmp_print_speculative_stats()
1938 percent(t->softFailedSpeculations, totalSpeculations)); in __kmp_print_speculative_stats()
1940 t->hardFailedSpeculations, in __kmp_print_speculative_stats()
1941 percent(t->hardFailedSpeculations, totalSpeculations)); in __kmp_print_speculative_stats()
1944 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1953 bool res = lck->lk.head_id == 0; in __kmp_is_unlocked_queuing_lock()
1970 lck->lk.adaptive.badness = 0; in __kmp_update_badness_after_success()
1976 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1; in __kmp_step_badness()
1977 if (newBadness > lck->lk.adaptive.max_badness) { in __kmp_step_badness()
1980 lck->lk.adaptive.badness = newBadness; in __kmp_step_badness()
1988 kmp_uint32 badness = lck->lk.adaptive.badness; in __kmp_should_speculate()
1989 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts; in __kmp_should_speculate()
1995 // Does not back off to the non-speculative lock.
1999 int retries = lck->lk.adaptive.max_soft_retries; in __kmp_test_adaptive_lock_only()
2012 /* We have successfully started speculation. Check that no-one acquired in __kmp_test_adaptive_lock_only()
2014 the lock cache line into our read-set, which we need so that we'll in __kmp_test_adaptive_lock_only()
2034 } while (retries--); // Loop while we have retries, and didn't fail hard. in __kmp_test_adaptive_lock_only()
2042 // Attempt to acquire the speculative lock, or back off to the non-speculative
2044 // We can succeed speculatively, non-speculatively, or fail.
2051 // Speculative acquisition failed, so try to acquire it non-speculatively. in __kmp_test_adaptive_lock()
2052 // Count the non-speculative acquire attempt in __kmp_test_adaptive_lock()
2053 lck->lk.adaptive.acquire_attempts++; in __kmp_test_adaptive_lock()
2055 // Use base, non-speculative lock. in __kmp_test_adaptive_lock()
2058 return 1; // Lock is acquired (non-speculatively) in __kmp_test_adaptive_lock()
2067 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { in __kmp_test_adaptive_lock_with_checks()
2074 lck->lk.qlk.owner_id = gtid + 1; in __kmp_test_adaptive_lock_with_checks()
2113 // Speculative acquisition failed, so acquire it non-speculatively. in __kmp_acquire_adaptive_lock()
2114 // Count the non-speculative acquire attempt in __kmp_acquire_adaptive_lock()
2115 lck->lk.adaptive.acquire_attempts++; in __kmp_acquire_adaptive_lock()
2125 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { in __kmp_acquire_adaptive_lock_with_checks()
2134 lck->lk.qlk.owner_id = gtid + 1; in __kmp_acquire_adaptive_lock_with_checks()
2157 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { in __kmp_release_adaptive_lock_with_checks()
2160 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) { in __kmp_release_adaptive_lock_with_checks()
2166 lck->lk.qlk.owner_id = 0; in __kmp_release_adaptive_lock_with_checks()
2173 lck->lk.adaptive.badness = 0; in __kmp_init_adaptive_lock()
2174 lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0; in __kmp_init_adaptive_lock()
2175 lck->lk.adaptive.max_soft_retries = in __kmp_init_adaptive_lock()
2177 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness; in __kmp_init_adaptive_lock()
2179 __kmp_zero_speculative_stats(&lck->lk.adaptive); in __kmp_init_adaptive_lock()
2186 __kmp_accumulate_speculative_stats(&lck->lk.adaptive); in __kmp_destroy_adaptive_lock()
2194 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { in __kmp_destroy_adaptive_lock_with_checks()
2197 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) { in __kmp_destroy_adaptive_lock_with_checks()
2205 /* ------------------------------------------------------------------------ */
2210 return lck->lk.owner_id - 1; in __kmp_get_drdpa_lock_owner()
2214 return lck->lk.depth_locked != -1; in __kmp_is_drdpa_lock_nestable()
2219 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket); in __kmp_acquire_drdpa_lock_timed_template()
2220 kmp_uint64 mask = lck->lk.mask; // atomic load in __kmp_acquire_drdpa_lock_timed_template()
2221 std::atomic<kmp_uint64> *polls = lck->lk.polls; in __kmp_acquire_drdpa_lock_timed_template()
2229 // Now spin-wait, but reload the polls pointer and mask, in case the in __kmp_acquire_drdpa_lock_timed_template()
2235 // and poll to be re-read every spin iteration. in __kmp_acquire_drdpa_lock_timed_template()
2243 // Re-read the mask and the poll pointer from the lock structure. in __kmp_acquire_drdpa_lock_timed_template()
2250 mask = lck->lk.mask; // atomic load in __kmp_acquire_drdpa_lock_timed_template()
2251 polls = lck->lk.polls; // atomic load in __kmp_acquire_drdpa_lock_timed_template()
2258 lck->lk.now_serving = ticket; // non-volatile store in __kmp_acquire_drdpa_lock_timed_template()
2265 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) { in __kmp_acquire_drdpa_lock_timed_template()
2266 __kmp_free(lck->lk.old_polls); in __kmp_acquire_drdpa_lock_timed_template()
2267 lck->lk.old_polls = NULL; in __kmp_acquire_drdpa_lock_timed_template()
2268 lck->lk.cleanup_ticket = 0; in __kmp_acquire_drdpa_lock_timed_template()
2274 if (lck->lk.old_polls == NULL) { in __kmp_acquire_drdpa_lock_timed_template()
2277 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls); in __kmp_acquire_drdpa_lock_timed_template()
2285 num_polls = TCR_4(lck->lk.num_polls); in __kmp_acquire_drdpa_lock_timed_template()
2296 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1; in __kmp_acquire_drdpa_lock_timed_template()
2332 lck->lk.old_polls = old_polls; in __kmp_acquire_drdpa_lock_timed_template()
2333 lck->lk.polls = polls; // atomic store in __kmp_acquire_drdpa_lock_timed_template()
2337 lck->lk.num_polls = num_polls; in __kmp_acquire_drdpa_lock_timed_template()
2338 lck->lk.mask = mask; // atomic store in __kmp_acquire_drdpa_lock_timed_template()
2345 // volatile load / non-volatile store in __kmp_acquire_drdpa_lock_timed_template()
2346 lck->lk.cleanup_ticket = lck->lk.next_ticket; in __kmp_acquire_drdpa_lock_timed_template()
2360 if (lck->lk.initialized != lck) { in __kmp_acquire_drdpa_lock_with_checks()
2372 lck->lk.owner_id = gtid + 1; in __kmp_acquire_drdpa_lock_with_checks()
2379 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load in __kmp_test_drdpa_lock()
2380 std::atomic<kmp_uint64> *polls = lck->lk.polls; in __kmp_test_drdpa_lock()
2381 kmp_uint64 mask = lck->lk.mask; // atomic load in __kmp_test_drdpa_lock()
2384 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket, in __kmp_test_drdpa_lock()
2389 lck->lk.now_serving = ticket; // non-volatile store in __kmp_test_drdpa_lock()
2394 // we'll let a later thread which calls __kmp_acquire_lock do that - this in __kmp_test_drdpa_lock()
2406 if (lck->lk.initialized != lck) { in __kmp_test_drdpa_lock_with_checks()
2416 lck->lk.owner_id = gtid + 1; in __kmp_test_drdpa_lock_with_checks()
2424 kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load in __kmp_release_drdpa_lock()
2425 std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load in __kmp_release_drdpa_lock()
2426 kmp_uint64 mask = lck->lk.mask; // atomic load in __kmp_release_drdpa_lock()
2428 ticket - 1, lck)); in __kmp_release_drdpa_lock()
2438 if (lck->lk.initialized != lck) { in __kmp_release_drdpa_lock_with_checks()
2444 if (__kmp_get_drdpa_lock_owner(lck) == -1) { in __kmp_release_drdpa_lock_with_checks()
2451 lck->lk.owner_id = 0; in __kmp_release_drdpa_lock_with_checks()
2456 lck->lk.location = NULL; in __kmp_init_drdpa_lock()
2457 lck->lk.mask = 0; in __kmp_init_drdpa_lock()
2458 lck->lk.num_polls = 1; in __kmp_init_drdpa_lock()
2459 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate( in __kmp_init_drdpa_lock()
2460 lck->lk.num_polls * sizeof(*(lck->lk.polls))); in __kmp_init_drdpa_lock()
2461 lck->lk.cleanup_ticket = 0; in __kmp_init_drdpa_lock()
2462 lck->lk.old_polls = NULL; in __kmp_init_drdpa_lock()
2463 lck->lk.next_ticket = 0; in __kmp_init_drdpa_lock()
2464 lck->lk.now_serving = 0; in __kmp_init_drdpa_lock()
2465 lck->lk.owner_id = 0; // no thread owns the lock. in __kmp_init_drdpa_lock()
2466 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. in __kmp_init_drdpa_lock()
2467 lck->lk.initialized = lck; in __kmp_init_drdpa_lock()
2473 lck->lk.initialized = NULL; in __kmp_destroy_drdpa_lock()
2474 lck->lk.location = NULL; in __kmp_destroy_drdpa_lock()
2475 if (lck->lk.polls.load() != NULL) { in __kmp_destroy_drdpa_lock()
2476 __kmp_free(lck->lk.polls.load()); in __kmp_destroy_drdpa_lock()
2477 lck->lk.polls = NULL; in __kmp_destroy_drdpa_lock()
2479 if (lck->lk.old_polls != NULL) { in __kmp_destroy_drdpa_lock()
2480 __kmp_free(lck->lk.old_polls); in __kmp_destroy_drdpa_lock()
2481 lck->lk.old_polls = NULL; in __kmp_destroy_drdpa_lock()
2483 lck->lk.mask = 0; in __kmp_destroy_drdpa_lock()
2484 lck->lk.num_polls = 0; in __kmp_destroy_drdpa_lock()
2485 lck->lk.cleanup_ticket = 0; in __kmp_destroy_drdpa_lock()
2486 lck->lk.next_ticket = 0; in __kmp_destroy_drdpa_lock()
2487 lck->lk.now_serving = 0; in __kmp_destroy_drdpa_lock()
2488 lck->lk.owner_id = 0; in __kmp_destroy_drdpa_lock()
2489 lck->lk.depth_locked = -1; in __kmp_destroy_drdpa_lock()
2494 if (lck->lk.initialized != lck) { in __kmp_destroy_drdpa_lock_with_checks()
2500 if (__kmp_get_drdpa_lock_owner(lck) != -1) { in __kmp_destroy_drdpa_lock_with_checks()
2512 lck->lk.depth_locked += 1; in __kmp_acquire_nested_drdpa_lock()
2517 lck->lk.depth_locked = 1; in __kmp_acquire_nested_drdpa_lock()
2519 lck->lk.owner_id = gtid + 1; in __kmp_acquire_nested_drdpa_lock()
2527 if (lck->lk.initialized != lck) { in __kmp_acquire_nested_drdpa_lock_with_checks()
2542 retval = ++lck->lk.depth_locked; in __kmp_test_nested_drdpa_lock()
2547 retval = lck->lk.depth_locked = 1; in __kmp_test_nested_drdpa_lock()
2549 lck->lk.owner_id = gtid + 1; in __kmp_test_nested_drdpa_lock()
2557 if (lck->lk.initialized != lck) { in __kmp_test_nested_drdpa_lock_with_checks()
2570 if (--(lck->lk.depth_locked) == 0) { in __kmp_release_nested_drdpa_lock()
2572 lck->lk.owner_id = 0; in __kmp_release_nested_drdpa_lock()
2583 if (lck->lk.initialized != lck) { in __kmp_release_nested_drdpa_lock_with_checks()
2589 if (__kmp_get_drdpa_lock_owner(lck) == -1) { in __kmp_release_nested_drdpa_lock_with_checks()
2600 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks in __kmp_init_nested_drdpa_lock()
2605 lck->lk.depth_locked = 0; in __kmp_destroy_nested_drdpa_lock()
2610 if (lck->lk.initialized != lck) { in __kmp_destroy_nested_drdpa_lock_with_checks()
2616 if (__kmp_get_drdpa_lock_owner(lck) != -1) { in __kmp_destroy_nested_drdpa_lock_with_checks()
2625 return lck->lk.location; in __kmp_get_drdpa_lock_location()
2630 lck->lk.location = loc; in __kmp_set_drdpa_lock_location()
2634 return lck->lk.flags; in __kmp_get_drdpa_lock_flags()
2639 lck->lk.flags = flags; in __kmp_set_drdpa_lock_flags()
2642 // Time stamp counter
2656 // shorter to go clockwise from a to b around the clock-face, or anti-clockwise.
2657 // Times where going clockwise is less distance than going anti-clockwise
2658 // are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0),
2659 // then a > b (true) does not mean a reached b; whereas signed(a) = -2,
2662 return ((kmp_int64)b - (kmp_int64)a) > 0; in before()
2669 for (i = boff->step; i > 0; i--) { in __kmp_spin_backoff()
2670 kmp_uint64 goal = __kmp_tsc() + boff->min_tick; in __kmp_spin_backoff()
2673 __kmp_tpause(0, boff->min_tick); in __kmp_spin_backoff()
2683 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1); in __kmp_spin_backoff()
2692 TCW_4(((kmp_base_tas_lock_t *)lck)->poll, KMP_GET_D_TAG(seq)); in __kmp_init_direct_lock()
2700 // HLE lock functions - imported from the testbed runtime.
2721 for (int i = delay; i != 0; --i) in __kmp_acquire_hle_lock()
2787 } while (retries--); in __kmp_acquire_rtm_queuing_lock()
2789 // Fall-back non-speculative lock (xchg) in __kmp_acquire_rtm_queuing_lock()
2827 } while (retries--); in __kmp_test_rtm_queuing_lock()
2837 // Reuse kmp_tas_lock_t for TSX lock which use RTM with fall-back spin lock.
2841 KMP_ATOMIC_ST_REL(&lck->lk.poll, 0); in __kmp_destroy_rtm_spin_lock()
2857 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) in __kmp_acquire_rtm_spin_lock()
2863 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free) { in __kmp_acquire_rtm_spin_lock()
2868 } while (retries--); in __kmp_acquire_rtm_spin_lock()
2870 // Fall-back spin lock in __kmp_acquire_rtm_spin_lock()
2873 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free || in __kmp_acquire_rtm_spin_lock()
2874 !__kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) { in __kmp_acquire_rtm_spin_lock()
2889 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == KMP_LOCK_FREE(rtm_spin)) { in __kmp_release_rtm_spin_lock()
2895 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(rtm_spin)); in __kmp_release_rtm_spin_lock()
2913 KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) { in __kmp_test_rtm_spin_lock()
2918 } while (retries--); in __kmp_test_rtm_spin_lock()
2920 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free && in __kmp_test_rtm_spin_lock()
2921 __kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) { in __kmp_test_rtm_spin_lock()
2954 __kmp_##op##_##lk##_##lock(&lock->lk); \
2959 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \
2964 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \
2969 __kmp_set_##lk##_lock_location(&lock->lk, loc); \
3132 idx = lck->lock->pool.index; in __kmp_allocate_indirect_lock()
3133 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next; in __kmp_allocate_indirect_lock()
3142 table_idx = lock_table->next; // index within this table in __kmp_allocate_indirect_lock()
3143 idx += lock_table->next; // global index within list of tables in __kmp_allocate_indirect_lock()
3144 if (table_idx < lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK) { in __kmp_allocate_indirect_lock()
3148 if (!lock_table->table[row]) { in __kmp_allocate_indirect_lock()
3149 lock_table->table[row] = (kmp_indirect_lock_t *)__kmp_allocate( in __kmp_allocate_indirect_lock()
3155 if (!lock_table->next_table) { in __kmp_allocate_indirect_lock()
3159 next_table->table = (kmp_indirect_lock_t **)__kmp_allocate( in __kmp_allocate_indirect_lock()
3160 sizeof(kmp_indirect_lock_t *) * 2 * lock_table->nrow_ptrs); in __kmp_allocate_indirect_lock()
3161 next_table->nrow_ptrs = 2 * lock_table->nrow_ptrs; in __kmp_allocate_indirect_lock()
3162 next_table->next = 0; in __kmp_allocate_indirect_lock()
3163 next_table->next_table = nullptr; in __kmp_allocate_indirect_lock()
3164 lock_table->next_table = next_table; in __kmp_allocate_indirect_lock()
3166 lock_table = lock_table->next_table; in __kmp_allocate_indirect_lock()
3169 lock_table->next++; in __kmp_allocate_indirect_lock()
3171 lck = &lock_table->table[row][col]; in __kmp_allocate_indirect_lock()
3173 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]); in __kmp_allocate_indirect_lock()
3180 lck->type = tag; in __kmp_allocate_indirect_lock()
3183 *(kmp_lock_index_t *)&(((kmp_base_tas_lock_t *)user_lock)->poll) = in __kmp_allocate_indirect_lock()
3235 KMP_I_LOCK_FUNC(l, init)(l->lock); in __kmp_init_indirect_lock()
3245 KMP_I_LOCK_FUNC(l, destroy)(l->lock); in __kmp_destroy_indirect_lock()
3246 kmp_indirect_locktag_t tag = l->type; in __kmp_destroy_indirect_lock()
3251 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag]; in __kmp_destroy_indirect_lock()
3253 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock); in __kmp_destroy_indirect_lock()
3262 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid); in __kmp_set_indirect_lock()
3267 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid); in __kmp_unset_indirect_lock()
3272 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid); in __kmp_test_indirect_lock()
3279 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid); in __kmp_set_indirect_lock_with_checks()
3286 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid); in __kmp_unset_indirect_lock_with_checks()
3293 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid); in __kmp_test_indirect_lock_with_checks()
3434 l = (kmp_indirect_lock_t *)l->lock->pool.next; in __kmp_cleanup_indirect_user_locks()
3437 __kmp_free(ll->lock); in __kmp_cleanup_indirect_user_locks()
3438 ll->lock = NULL; in __kmp_cleanup_indirect_user_locks()
3445 for (kmp_uint32 row = 0; row < ptr->nrow_ptrs; ++row) { in __kmp_cleanup_indirect_user_locks()
3446 if (!ptr->table[row]) in __kmp_cleanup_indirect_user_locks()
3449 kmp_indirect_lock_t *l = &ptr->table[row][col]; in __kmp_cleanup_indirect_user_locks()
3450 if (l->lock) { in __kmp_cleanup_indirect_user_locks()
3452 KMP_I_LOCK_FUNC(l, destroy)(l->lock); in __kmp_cleanup_indirect_user_locks()
3456 __kmp_free(l->lock); in __kmp_cleanup_indirect_user_locks()
3459 __kmp_free(ptr->table[row]); in __kmp_cleanup_indirect_user_locks()
3461 kmp_indirect_lock_table_t *next_table = ptr->next_table; in __kmp_cleanup_indirect_user_locks()
3471 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3494 return lck == lck->lk.self; in __kmp_is_ticket_lock_initialized()
3506 return lck == lck->lk.initialized; in __kmp_is_queuing_lock_initialized()
3525 return lck == lck->lk.initialized; in __kmp_is_drdpa_lock_initialized()
3785 // ----------------------------------------------------------------------------
3791 // Lock block-allocation support.
3793 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3809 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1)); in __kmp_lock_table_insert()
3840 new_block->next_block = __kmp_lock_blocks; in __kmp_lock_block_allocate()
3841 new_block->locks = (void *)buffer; in __kmp_lock_block_allocate()
3847 ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size])); in __kmp_lock_block_allocate()
3877 index = __kmp_lock_pool->pool.index; in __kmp_user_lock_allocate()
3878 __kmp_lock_pool = __kmp_lock_pool->pool.next; in __kmp_user_lock_allocate()
3905 lck->pool.next = __kmp_lock_pool; in __kmp_user_lock_free()
3910 lck->pool.index = index; in __kmp_user_lock_free()
3949 // Reset lock pool. Don't worry about lock in the pool--we will free them when in __kmp_cleanup_user_locks()
3960 // FIXME - we are iterating through a list of (pointers to) objects of type in __kmp_cleanup_user_locks()
3986 __kmp_user_lock_table.table[--__kmp_user_lock_table.used]; in __kmp_cleanup_user_locks()
3995 (loc->psource != NULL)) { in __kmp_cleanup_user_locks()
3996 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, false); in __kmp_cleanup_user_locks()
4043 kmp_block_of_locks_t *next = block_ptr->next_block; in __kmp_cleanup_user_locks()
4044 __kmp_free(block_ptr->locks); in __kmp_cleanup_user_locks()