Lines Matching defs:thr

228 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
229 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
232 SlotLocker locker(thr);
234 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
237 thr->clock.ReleaseAcquire(&s->clock);
239 thr->clock.Release(&s->clock);
241 thr->clock.Acquire(s->clock);
245 IncrementEpoch(thr);
263 static T Atomic(ThreadState *thr, uptr pc, morder mo, const volatile T *a) {
268 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
277 SlotLocker locker(thr);
279 thr->clock.Acquire(s->clock);
284 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
304 static void Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
306 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
316 SlotLocker locker(thr);
318 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
320 thr->clock.ReleaseStore(&s->clock);
323 IncrementEpoch(thr);
333 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
334 return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
345 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
346 return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
357 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
358 return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
369 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
370 return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
381 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
382 return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
393 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
394 return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
405 static T Atomic(ThreadState *thr, uptr pc, morder mo, volatile T *a, T v) {
406 return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
435 static bool Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
442 MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
452 SlotLocker locker(thr);
456 auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
466 thr->clock.ReleaseAcquire(&s->clock);
468 thr->clock.Release(&s->clock);
470 thr->clock.Acquire(s->clock);
473 IncrementEpoch(thr);
478 static T Atomic(ThreadState *thr, uptr pc, morder mo, morder fmo,
480 Atomic(thr, pc, mo, fmo, a, &c, v);
489 static void Atomic(ThreadState *thr, uptr pc, morder mo) {
525 ThreadState *const thr = cur_thread();
526 ProcessPendingSignals(thr);
527 if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors))
529 return Op::Atomic(thr, GET_CALLER_PC(), convert_morder(mo), args...);
886 void AtomicGo(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
887 if (thr->ignore_sync) {
890 FuncEntry(thr, cpc);
891 (void)Op::Atomic(thr, pc, args...);
892 FuncExit(thr);
897 auto AtomicGoRet(ThreadState *thr, uptr cpc, uptr pc, Types... args) {
898 if (thr->ignore_sync) {
901 FuncEntry(thr, cpc);
902 auto ret = Op::Atomic(thr, pc, args...);
903 FuncExit(thr);
910 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
911 *(a32 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a32 **)a);
915 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
916 *(a64 *)(a + 8) = AtomicGoRet<OpLoad>(thr, cpc, pc, mo_acquire, *(a64 **)a);
920 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
921 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a32 **)a, *(a32 *)(a + 8));
925 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
926 AtomicGo<OpStore>(thr, cpc, pc, mo_release, *(a64 **)a, *(a64 *)(a + 8));
930 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
931 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
936 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
937 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAdd>(thr, cpc, pc, mo_acq_rel,
942 void __tsan_go_atomic32_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
943 *(a32 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
948 void __tsan_go_atomic64_fetch_and(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
949 *(a64 *)(a + 16) = AtomicGoRet<OpFetchAnd>(thr, cpc, pc, mo_acq_rel,
954 void __tsan_go_atomic32_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
955 *(a32 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
960 void __tsan_go_atomic64_fetch_or(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
961 *(a64 *)(a + 16) = AtomicGoRet<OpFetchOr>(thr, cpc, pc, mo_acq_rel,
966 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
967 *(a32 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
972 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
973 *(a64 *)(a + 16) = AtomicGoRet<OpExchange>(thr, cpc, pc, mo_acq_rel,
978 void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
981 a32 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a32 **)a,
987 void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
990 a64 cur = AtomicGoRet<OpCAS>(thr, cpc, pc, mo_acq_rel, mo_acquire, *(a64 **)a,