Lines Matching +full:non +full:- +full:volatile
2 * kmp_lock.h -- lock header file
5 //===----------------------------------------------------------------------===//
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
28 // ----------------------------------------------------------------------------
33 (sizeof(type) + (sz - ((sizeof(type) - 1) % (sz)) - 1))
34 #define KMP_GTID_DNE (-2)
42 // ----------------------------------------------------------------------------
46 // compiler always allocates a pointer-sized area, as does visual studio.
48 // gcc however, only allocates 4 bytes for regular locks, even on 64-bit
50 // recent versions), but we are bounded by the pointer-sized chunks that
61 // The Intel compiler allocates a 32-byte chunk for a critical section.
86 // ----------------------------------------------------------------------------
96 // 1. Bootstrap locks -- Used for a few locks available at library
97 // startup-shutdown time.
98 // These do not require non-negative global thread ID's.
99 // 2. Internal RTL locks -- Used everywhere else in the RTL
101 // ----------------------------------------------------------------------------
108 // Non-nested test and set locks differ from the other lock kinds (except
114 // the depth_locked field for non-nested locks.
119 // ----------------------------------------------------------------------------
125 // Flip the ordering of the high and low 32-bit member to be consistent
126 // with the memory layout of the address in 64-bit big-endian.
175 // ----------------------------------------------------------------------------
178 // Like non-nested test and set lock, non-nested futex locks use the memory
183 // set locks. With non-nested futex locks, the lock owner is not even available.
184 // ----------------------------------------------------------------------------
187 volatile kmp_int32 poll; // KMP_LOCK_FREE(futex) => unlocked
227 // ----------------------------------------------------------------------------
233 // MSVC won't allow use of std::atomic<> in a union since it has non-trivial
239 volatile union kmp_ticket_lock *self; // points to the lock union
252 volatile union kmp_ticket_lock *self; // points to the lock union
287 { true, &(lock), NULL, 0U, 0U, 0, -1 } \
307 // ----------------------------------------------------------------------------
344 kmp_uint32 volatile badness;
345 kmp_uint32 volatile acquire_attempts;
351 kmp_adaptive_lock_statistics_t volatile stats;
360 volatile union kmp_queuing_lock
365 KMP_ALIGN(8) // tail_id must be 8-byte aligned!
367 volatile kmp_int32
369 // Must be no padding here since head/tail used in 8-byte CAS
370 volatile kmp_int32
373 // bakery-style lock
374 volatile kmp_uint32
376 volatile kmp_uint32
378 volatile kmp_int32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
415 // ----------------------------------------------------------------------------
434 #define GET_QLK_PTR(l) ((kmp_queuing_lock_t *)&(l)->lk.qlk)
438 // ----------------------------------------------------------------------------
449 volatile union kmp_drdpa_lock
453 std::atomic<kmp_uint64> mask; // is 2**num_polls-1 for mod op
470 // it is non-volatile, but it needs to exist on a separate cache line,
475 // line. owner_id is read by other threads, so it must be declared volatile.
477 kmp_uint64 now_serving; // doesn't have to be volatile
478 volatile kmp_uint32 owner_id; // (gtid+1) of owning thread, 0 if unlocked
515 // Bootstrap locks -- very few locks used at library initialization time.
551 // FIXME - We should go through and figure out which lock kind works best for
648 lck->tas.lk.depth_locked != -1) { \
651 if ((gtid >= 0) && (lck->tas.lk.poll - 1 == gtid)) { \
655 if (lck->tas.lk.poll != 0 || \
656 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
665 lck->tas.lk.poll != 0 || \
666 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
696 lck->tas.lk.depth_locked != -1) { in __kmp_test_user_lock_with_checks()
700 return ((lck->tas.lk.poll == 0) && in __kmp_test_user_lock_with_checks()
701 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); in __kmp_test_user_lock_with_checks()
731 // We need a non-checking version of destroy lock for when the RTL is
757 lck->tas.lk.depth_locked == -1) { \
761 if (lck->tas.lk.poll - 1 == gtid) { \
762 lck->tas.lk.depth_locked += 1; \
765 if ((lck->tas.lk.poll != 0) || \
766 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)) { \
775 (lck->tas.lk.poll != 0) || \
776 !__kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); \
778 lck->tas.lk.depth_locked = 1; \
807 lck->tas.lk.depth_locked == -1) { in __kmp_test_nested_user_lock_with_checks()
812 if (lck->tas.lk.poll - 1 == in __kmp_test_nested_user_lock_with_checks()
814 return ++lck->tas.lk.depth_locked; /* same owner, depth increased */ in __kmp_test_nested_user_lock_with_checks()
816 retval = ((lck->tas.lk.poll == 0) && in __kmp_test_nested_user_lock_with_checks()
817 __kmp_atomic_compare_store_acq(&lck->tas.lk.poll, 0, gtid + 1)); in __kmp_test_nested_user_lock_with_checks()
820 lck->tas.lk.depth_locked = 1; in __kmp_test_nested_user_lock_with_checks()
865 // function pointer and call it if non-NULL.
938 /* On 64-bit Linux* OS (and OS X*) GNU compiler allocates only 4 bytems memory
1016 // object, and we differentiate lock types by this size requirement - direct and
1029 // An indirect lock object requires more space than the compiler-generated
1031 // compiler-generated space for the lock (i.e., size of omp_lock_t), this
1032 // omp_lock_t object stores either the address of the heap-allocated indirect
1115 #define KMP_GET_I_TAG(seq) (kmp_indirect_locktag_t)((seq)-KMP_FIRST_I_LOCK)
1149 ((kmp_dyna_lock_t)((kmp_base_tas_lock_t *)(l))->poll & \
1150 ((1 << KMP_LOCK_SHIFT) - 1) & \
1151 -((kmp_dyna_lock_t)((kmp_tas_lock_t *)(l))->lk.poll & 1))
1155 ((kmp_lock_index_t)((kmp_base_tas_lock_t *)(l))->poll >> 1)
1164 __kmp_indirect_##op[((kmp_indirect_lock_t *)(l))->type]
1202 if (__kmp_indirect_set_location[(lck)->type] != NULL) \
1203 __kmp_indirect_set_location[(lck)->type]((lck)->lock, loc); \
1211 if (__kmp_indirect_set_flags[(lck)->type] != NULL) \
1212 __kmp_indirect_set_flags[(lck)->type]((lck)->lock, flag); \
1219 (__kmp_indirect_get_location[(lck)->type] != NULL \
1220 ? __kmp_indirect_get_location[(lck)->type]((lck)->lock) \
1227 (__kmp_indirect_get_flags[(lck)->type] != NULL \
1228 ? __kmp_indirect_get_flags[(lck)->type]((lck)->lock) \
1253 kmp_lock_index_t max_locks = lock_table->nrow_ptrs * KMP_I_LOCK_CHUNK; in __kmp_get_i_lock()
1257 if (!lock_table->table[row] || idx >= lock_table->next) in __kmp_get_i_lock()
1259 return &lock_table->table[row][col]; in __kmp_get_i_lock()
1261 idx -= max_locks; in __kmp_get_i_lock()
1262 lock_table = lock_table->next_table; in __kmp_get_i_lock()
1293 kmp_uint32 min_tick; // size of inner delay loop in ticks (machine-dependent)