1 //===-- tsan_sync.h ---------------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 //===----------------------------------------------------------------------===// 13 #ifndef TSAN_SYNC_H 14 #define TSAN_SYNC_H 15 16 #include "sanitizer_common/sanitizer_atomic.h" 17 #include "sanitizer_common/sanitizer_common.h" 18 #include "sanitizer_common/sanitizer_deadlock_detector_interface.h" 19 #include "tsan_defs.h" 20 #include "tsan_clock.h" 21 #include "tsan_mutex.h" 22 #include "tsan_dense_alloc.h" 23 24 namespace __tsan { 25 26 // These need to match __tsan_mutex_* flags defined in tsan_interface.h. 27 // See documentation there as well. 28 enum MutexFlags { 29 MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init 30 MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant 31 MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant 32 MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock 33 MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock 34 MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed 35 MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock 36 MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock 37 MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static 38 39 // The following flags are runtime private. 40 // Mutex API misuse was detected, so don't report any more. 41 MutexFlagBroken = 1 << 30, 42 // We did not intercept pre lock event, so handle it on post lock. 43 MutexFlagDoPreLockOnPostLock = 1 << 29, 44 // Must list all mutex creation flags. 45 MutexCreationFlagMask = MutexFlagLinkerInit | 46 MutexFlagWriteReentrant | 47 MutexFlagReadReentrant | 48 MutexFlagNotStatic, 49 }; 50 51 struct SyncVar { 52 SyncVar(); 53 54 static const int kInvalidTid = -1; 55 56 uptr addr; // overwritten by DenseSlabAlloc freelist 57 Mutex mtx; 58 u64 uid; // Globally unique id. 59 u32 creation_stack_id; 60 int owner_tid; // Set only by exclusive owners. 61 u64 last_lock; 62 int recursion; 63 atomic_uint32_t flags; 64 u32 next; // in MetaMap 65 DDMutex dd; 66 SyncClock read_clock; // Used for rw mutexes only. 67 // The clock is placed last, so that it is situated on a different cache line 68 // with the mtx. This reduces contention for hot sync objects. 69 SyncClock clock; 70 71 void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid); 72 void Reset(Processor *proc); 73 GetIdSyncVar74 u64 GetId() const { 75 // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits. 76 return GetLsb((u64)addr | (uid << 48), 60); 77 } CheckIdSyncVar78 bool CheckId(u64 uid) const { 79 CHECK_EQ(uid, GetLsb(uid, 14)); 80 return GetLsb(this->uid, 14) == uid; 81 } SplitIdSyncVar82 static uptr SplitId(u64 id, u64 *uid) { 83 *uid = id >> 48; 84 return (uptr)GetLsb(id, 48); 85 } 86 IsFlagSetSyncVar87 bool IsFlagSet(u32 f) const { 88 return atomic_load_relaxed(&flags) & f; 89 } 90 SetFlagsSyncVar91 void SetFlags(u32 f) { 92 atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f); 93 } 94 UpdateFlagsSyncVar95 void UpdateFlags(u32 flagz) { 96 // Filter out operation flags. 97 if (!(flagz & MutexCreationFlagMask)) 98 return; 99 u32 current = atomic_load_relaxed(&flags); 100 if (current & MutexCreationFlagMask) 101 return; 102 // Note: this can be called from MutexPostReadLock which holds only read 103 // lock on the SyncVar. 104 atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask)); 105 } 106 }; 107 108 /* MetaMap allows to map arbitrary user pointers onto various descriptors. 109 Currently it maps pointers to heap block descriptors and sync var descs. 110 It uses 1/2 direct shadow, see tsan_platform.h. 111 */ 112 class MetaMap { 113 public: 114 MetaMap(); 115 116 void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz); 117 uptr FreeBlock(Processor *proc, uptr p); 118 bool FreeRange(Processor *proc, uptr p, uptr sz); 119 void ResetRange(Processor *proc, uptr p, uptr sz); 120 MBlock* GetBlock(uptr p); 121 122 SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc, 123 uptr addr, bool write_lock); 124 SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock); 125 126 void MoveMemory(uptr src, uptr dst, uptr sz); 127 128 void OnProcIdle(Processor *proc); 129 130 private: 131 static const u32 kFlagMask = 3u << 30; 132 static const u32 kFlagBlock = 1u << 30; 133 static const u32 kFlagSync = 2u << 30; 134 typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc; 135 typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc; 136 BlockAlloc block_alloc_; 137 SyncAlloc sync_alloc_; 138 atomic_uint64_t uid_gen_; 139 140 SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock, 141 bool create); 142 }; 143 144 } // namespace __tsan 145 146 #endif // TSAN_SYNC_H 147