xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.h (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
10b57cec5SDimitry Andric //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
20b57cec5SDimitry Andric //
30b57cec5SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
40b57cec5SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
50b57cec5SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
60b57cec5SDimitry Andric //
70b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
80b57cec5SDimitry Andric //
90b57cec5SDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
100b57cec5SDimitry Andric //
110b57cec5SDimitry Andric // Main internal TSan header file.
120b57cec5SDimitry Andric //
130b57cec5SDimitry Andric // Ground rules:
140b57cec5SDimitry Andric //   - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
150b57cec5SDimitry Andric //     function-scope locals)
160b57cec5SDimitry Andric //   - All functions/classes/etc reside in namespace __tsan, except for those
170b57cec5SDimitry Andric //     declared in tsan_interface.h.
180b57cec5SDimitry Andric //   - Platform-specific files should be used instead of ifdefs (*).
190b57cec5SDimitry Andric //   - No system headers included in header files (*).
200b57cec5SDimitry Andric //   - Platform specific headres included only into platform-specific files (*).
210b57cec5SDimitry Andric //
220b57cec5SDimitry Andric //  (*) Except when inlining is critical for performance.
230b57cec5SDimitry Andric //===----------------------------------------------------------------------===//
240b57cec5SDimitry Andric 
250b57cec5SDimitry Andric #ifndef TSAN_RTL_H
260b57cec5SDimitry Andric #define TSAN_RTL_H
270b57cec5SDimitry Andric 
280b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_allocator.h"
290b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_allocator_internal.h"
300b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_asm.h"
310b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_common.h"
320b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
330b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_libignore.h"
340b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_suppressions.h"
350b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_thread_registry.h"
360b57cec5SDimitry Andric #include "sanitizer_common/sanitizer_vector.h"
370b57cec5SDimitry Andric #include "tsan_defs.h"
380b57cec5SDimitry Andric #include "tsan_flags.h"
39349cc55cSDimitry Andric #include "tsan_ignoreset.h"
400eae32dcSDimitry Andric #include "tsan_ilist.h"
410b57cec5SDimitry Andric #include "tsan_mman.h"
42349cc55cSDimitry Andric #include "tsan_mutexset.h"
43349cc55cSDimitry Andric #include "tsan_platform.h"
44349cc55cSDimitry Andric #include "tsan_report.h"
45349cc55cSDimitry Andric #include "tsan_shadow.h"
46349cc55cSDimitry Andric #include "tsan_stack_trace.h"
470b57cec5SDimitry Andric #include "tsan_sync.h"
480b57cec5SDimitry Andric #include "tsan_trace.h"
490eae32dcSDimitry Andric #include "tsan_vector_clock.h"
500b57cec5SDimitry Andric 
510b57cec5SDimitry Andric #if SANITIZER_WORDSIZE != 64
520b57cec5SDimitry Andric # error "ThreadSanitizer is supported only on 64-bit platforms"
530b57cec5SDimitry Andric #endif
540b57cec5SDimitry Andric 
550b57cec5SDimitry Andric namespace __tsan {
560b57cec5SDimitry Andric 
570b57cec5SDimitry Andric #if !SANITIZER_GO
580b57cec5SDimitry Andric struct MapUnmapCallback;
59bdd1243dSDimitry Andric #  if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
605f757f3fSDimitry Andric       defined(__powerpc__) || SANITIZER_RISCV64
610b57cec5SDimitry Andric 
620b57cec5SDimitry Andric struct AP32 {
630b57cec5SDimitry Andric   static const uptr kSpaceBeg = 0;
640b57cec5SDimitry Andric   static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
650b57cec5SDimitry Andric   static const uptr kMetadataSize = 0;
660b57cec5SDimitry Andric   typedef __sanitizer::CompactSizeClassMap SizeClassMap;
670b57cec5SDimitry Andric   static const uptr kRegionSizeLog = 20;
680b57cec5SDimitry Andric   using AddressSpaceView = LocalAddressSpaceView;
690b57cec5SDimitry Andric   typedef __tsan::MapUnmapCallback MapUnmapCallback;
700b57cec5SDimitry Andric   static const uptr kFlags = 0;
710b57cec5SDimitry Andric };
720b57cec5SDimitry Andric typedef SizeClassAllocator32<AP32> PrimaryAllocator;
730b57cec5SDimitry Andric #else
740b57cec5SDimitry Andric struct AP64 {  // Allocator64 parameters. Deliberately using a short name.
75349cc55cSDimitry Andric #    if defined(__s390x__)
76349cc55cSDimitry Andric   typedef MappingS390x Mapping;
77349cc55cSDimitry Andric #    else
78349cc55cSDimitry Andric   typedef Mapping48AddressSpace Mapping;
79349cc55cSDimitry Andric #    endif
800b57cec5SDimitry Andric   static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
810b57cec5SDimitry Andric   static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
820b57cec5SDimitry Andric   static const uptr kMetadataSize = 0;
830b57cec5SDimitry Andric   typedef DefaultSizeClassMap SizeClassMap;
840b57cec5SDimitry Andric   typedef __tsan::MapUnmapCallback MapUnmapCallback;
850b57cec5SDimitry Andric   static const uptr kFlags = 0;
860b57cec5SDimitry Andric   using AddressSpaceView = LocalAddressSpaceView;
870b57cec5SDimitry Andric };
880b57cec5SDimitry Andric typedef SizeClassAllocator64<AP64> PrimaryAllocator;
890b57cec5SDimitry Andric #endif
900b57cec5SDimitry Andric typedef CombinedAllocator<PrimaryAllocator> Allocator;
910b57cec5SDimitry Andric typedef Allocator::AllocatorCache AllocatorCache;
920b57cec5SDimitry Andric Allocator *allocator();
930b57cec5SDimitry Andric #endif
940b57cec5SDimitry Andric 
950b57cec5SDimitry Andric struct ThreadSignalContext;
960b57cec5SDimitry Andric 
970b57cec5SDimitry Andric struct JmpBuf {
980b57cec5SDimitry Andric   uptr sp;
990b57cec5SDimitry Andric   int int_signal_send;
1000b57cec5SDimitry Andric   bool in_blocking_func;
1010b57cec5SDimitry Andric   uptr in_signal_handler;
1020b57cec5SDimitry Andric   uptr *shadow_stack_pos;
1030b57cec5SDimitry Andric };
1040b57cec5SDimitry Andric 
1050b57cec5SDimitry Andric // A Processor represents a physical thread, or a P for Go.
1060b57cec5SDimitry Andric // It is used to store internal resources like allocate cache, and does not
1070b57cec5SDimitry Andric // participate in race-detection logic (invisible to end user).
1080b57cec5SDimitry Andric // In C++ it is tied to an OS thread just like ThreadState, however ideally
1090b57cec5SDimitry Andric // it should be tied to a CPU (this way we will have fewer allocator caches).
1100b57cec5SDimitry Andric // In Go it is tied to a P, so there are significantly fewer Processor's than
1110b57cec5SDimitry Andric // ThreadState's (which are tied to Gs).
1120b57cec5SDimitry Andric // A ThreadState must be wired with a Processor to handle events.
1130b57cec5SDimitry Andric struct Processor {
1140b57cec5SDimitry Andric   ThreadState *thr; // currently wired thread, or nullptr
1150b57cec5SDimitry Andric #if !SANITIZER_GO
1160b57cec5SDimitry Andric   AllocatorCache alloc_cache;
1170b57cec5SDimitry Andric   InternalAllocatorCache internal_alloc_cache;
1180b57cec5SDimitry Andric #endif
1190b57cec5SDimitry Andric   DenseSlabAllocCache block_cache;
1200b57cec5SDimitry Andric   DenseSlabAllocCache sync_cache;
1210b57cec5SDimitry Andric   DDPhysicalThread *dd_pt;
1220b57cec5SDimitry Andric };
1230b57cec5SDimitry Andric 
1240b57cec5SDimitry Andric #if !SANITIZER_GO
1250b57cec5SDimitry Andric // ScopedGlobalProcessor temporary setups a global processor for the current
1260b57cec5SDimitry Andric // thread, if it does not have one. Intended for interceptors that can run
1270b57cec5SDimitry Andric // at the very thread end, when we already destroyed the thread processor.
1280b57cec5SDimitry Andric struct ScopedGlobalProcessor {
1290b57cec5SDimitry Andric   ScopedGlobalProcessor();
1300b57cec5SDimitry Andric   ~ScopedGlobalProcessor();
1310b57cec5SDimitry Andric };
1320b57cec5SDimitry Andric #endif
1330b57cec5SDimitry Andric 
1340eae32dcSDimitry Andric struct TidEpoch {
1350eae32dcSDimitry Andric   Tid tid;
1360eae32dcSDimitry Andric   Epoch epoch;
1370eae32dcSDimitry Andric };
1380eae32dcSDimitry Andric 
139*0fca6ea1SDimitry Andric struct alignas(SANITIZER_CACHE_LINE_SIZE) TidSlot {
1400eae32dcSDimitry Andric   Mutex mtx;
1410eae32dcSDimitry Andric   Sid sid;
1420eae32dcSDimitry Andric   atomic_uint32_t raw_epoch;
1430eae32dcSDimitry Andric   ThreadState *thr;
1440eae32dcSDimitry Andric   Vector<TidEpoch> journal;
1450eae32dcSDimitry Andric   INode node;
1460eae32dcSDimitry Andric 
1470eae32dcSDimitry Andric   Epoch epoch() const {
1480eae32dcSDimitry Andric     return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
1490eae32dcSDimitry Andric   }
1500eae32dcSDimitry Andric 
1510eae32dcSDimitry Andric   void SetEpoch(Epoch v) {
1520eae32dcSDimitry Andric     atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
1530eae32dcSDimitry Andric   }
1540eae32dcSDimitry Andric 
1550eae32dcSDimitry Andric   TidSlot();
156*0fca6ea1SDimitry Andric };
1570eae32dcSDimitry Andric 
1580b57cec5SDimitry Andric // This struct is stored in TLS.
159*0fca6ea1SDimitry Andric struct alignas(SANITIZER_CACHE_LINE_SIZE) ThreadState {
1600b57cec5SDimitry Andric   FastState fast_state;
1610eae32dcSDimitry Andric   int ignore_sync;
1620eae32dcSDimitry Andric #if !SANITIZER_GO
1630eae32dcSDimitry Andric   int ignore_interceptors;
1640eae32dcSDimitry Andric #endif
1650eae32dcSDimitry Andric   uptr *shadow_stack_pos;
1660eae32dcSDimitry Andric 
1670eae32dcSDimitry Andric   // Current position in tctx->trace.Back()->events (Event*).
1680eae32dcSDimitry Andric   atomic_uintptr_t trace_pos;
1690eae32dcSDimitry Andric   // PC of the last memory access, used to compute PC deltas in the trace.
1700eae32dcSDimitry Andric   uptr trace_prev_pc;
1710eae32dcSDimitry Andric 
1720b57cec5SDimitry Andric   // Technically `current` should be a separate THREADLOCAL variable;
1730b57cec5SDimitry Andric   // but it is placed here in order to share cache line with previous fields.
1740b57cec5SDimitry Andric   ThreadState* current;
1750eae32dcSDimitry Andric 
1760eae32dcSDimitry Andric   atomic_sint32_t pending_signals;
1770eae32dcSDimitry Andric 
1780eae32dcSDimitry Andric   VectorClock clock;
1790eae32dcSDimitry Andric 
1800b57cec5SDimitry Andric   // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
1810b57cec5SDimitry Andric   // We do not distinguish beteween ignoring reads and writes
1820b57cec5SDimitry Andric   // for better performance.
1830b57cec5SDimitry Andric   int ignore_reads_and_writes;
1840b57cec5SDimitry Andric   int suppress_reports;
1850b57cec5SDimitry Andric   // Go does not support ignores.
1860b57cec5SDimitry Andric #if !SANITIZER_GO
1870b57cec5SDimitry Andric   IgnoreSet mop_ignore_set;
1880b57cec5SDimitry Andric   IgnoreSet sync_ignore_set;
1890b57cec5SDimitry Andric #endif
1900b57cec5SDimitry Andric   uptr *shadow_stack;
1910b57cec5SDimitry Andric   uptr *shadow_stack_end;
1920b57cec5SDimitry Andric #if !SANITIZER_GO
1930b57cec5SDimitry Andric   Vector<JmpBuf> jmp_bufs;
1940eae32dcSDimitry Andric   int in_symbolizer;
195bdd1243dSDimitry Andric   atomic_uintptr_t in_blocking_func;
1960b57cec5SDimitry Andric   bool in_ignored_lib;
1970b57cec5SDimitry Andric   bool is_inited;
1980eae32dcSDimitry Andric #endif
1990eae32dcSDimitry Andric   MutexSet mset;
2000b57cec5SDimitry Andric   bool is_dead;
2010eae32dcSDimitry Andric   const Tid tid;
2020eae32dcSDimitry Andric   uptr stk_addr;
2030eae32dcSDimitry Andric   uptr stk_size;
2040eae32dcSDimitry Andric   uptr tls_addr;
2050eae32dcSDimitry Andric   uptr tls_size;
2060b57cec5SDimitry Andric   ThreadContext *tctx;
2070b57cec5SDimitry Andric 
2080b57cec5SDimitry Andric   DDLogicalThread *dd_lt;
2090b57cec5SDimitry Andric 
2100eae32dcSDimitry Andric   TidSlot *slot;
2110eae32dcSDimitry Andric   uptr slot_epoch;
2120eae32dcSDimitry Andric   bool slot_locked;
2130eae32dcSDimitry Andric 
2140b57cec5SDimitry Andric   // Current wired Processor, or nullptr. Required to handle any events.
2150b57cec5SDimitry Andric   Processor *proc1;
2160b57cec5SDimitry Andric #if !SANITIZER_GO
2170b57cec5SDimitry Andric   Processor *proc() { return proc1; }
2180b57cec5SDimitry Andric #else
2190b57cec5SDimitry Andric   Processor *proc();
2200b57cec5SDimitry Andric #endif
2210b57cec5SDimitry Andric 
2220b57cec5SDimitry Andric   atomic_uintptr_t in_signal_handler;
223bdd1243dSDimitry Andric   atomic_uintptr_t signal_ctx;
2240b57cec5SDimitry Andric 
2250b57cec5SDimitry Andric #if !SANITIZER_GO
226349cc55cSDimitry Andric   StackID last_sleep_stack_id;
2270eae32dcSDimitry Andric   VectorClock last_sleep_clock;
2280b57cec5SDimitry Andric #endif
2290b57cec5SDimitry Andric 
2300b57cec5SDimitry Andric   // Set in regions of runtime that must be signal-safe and fork-safe.
2310b57cec5SDimitry Andric   // If set, malloc must not be called.
2320b57cec5SDimitry Andric   int nomalloc;
2330b57cec5SDimitry Andric 
2340b57cec5SDimitry Andric   const ReportDesc *current_report;
2350b57cec5SDimitry Andric 
2360eae32dcSDimitry Andric   explicit ThreadState(Tid tid);
237*0fca6ea1SDimitry Andric };
2380b57cec5SDimitry Andric 
2390b57cec5SDimitry Andric #if !SANITIZER_GO
24081ad6265SDimitry Andric #if SANITIZER_APPLE || SANITIZER_ANDROID
2410b57cec5SDimitry Andric ThreadState *cur_thread();
2420b57cec5SDimitry Andric void set_cur_thread(ThreadState *thr);
2430b57cec5SDimitry Andric void cur_thread_finalize();
244349cc55cSDimitry Andric inline ThreadState *cur_thread_init() { return cur_thread(); }
2450b57cec5SDimitry Andric #  else
2460b57cec5SDimitry Andric __attribute__((tls_model("initial-exec")))
2470b57cec5SDimitry Andric extern THREADLOCAL char cur_thread_placeholder[];
248e8d8bef9SDimitry Andric inline ThreadState *cur_thread() {
2490b57cec5SDimitry Andric   return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
2500b57cec5SDimitry Andric }
251349cc55cSDimitry Andric inline ThreadState *cur_thread_init() {
2520b57cec5SDimitry Andric   ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
2530b57cec5SDimitry Andric   if (UNLIKELY(!thr->current))
2540b57cec5SDimitry Andric     thr->current = thr;
255349cc55cSDimitry Andric   return thr->current;
2560b57cec5SDimitry Andric }
257e8d8bef9SDimitry Andric inline void set_cur_thread(ThreadState *thr) {
2580b57cec5SDimitry Andric   reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
2590b57cec5SDimitry Andric }
260e8d8bef9SDimitry Andric inline void cur_thread_finalize() { }
26181ad6265SDimitry Andric #  endif  // SANITIZER_APPLE || SANITIZER_ANDROID
2620b57cec5SDimitry Andric #endif  // SANITIZER_GO
2630b57cec5SDimitry Andric 
264e8d8bef9SDimitry Andric class ThreadContext final : public ThreadContextBase {
2650b57cec5SDimitry Andric  public:
266349cc55cSDimitry Andric   explicit ThreadContext(Tid tid);
2670b57cec5SDimitry Andric   ~ThreadContext();
2680b57cec5SDimitry Andric   ThreadState *thr;
269349cc55cSDimitry Andric   StackID creation_stack_id;
2700eae32dcSDimitry Andric   VectorClock *sync;
2710eae32dcSDimitry Andric   uptr sync_epoch;
2720eae32dcSDimitry Andric   Trace trace;
273349cc55cSDimitry Andric 
2740b57cec5SDimitry Andric   // Override superclass callbacks.
2750b57cec5SDimitry Andric   void OnDead() override;
2760b57cec5SDimitry Andric   void OnJoined(void *arg) override;
2770b57cec5SDimitry Andric   void OnFinished() override;
2780b57cec5SDimitry Andric   void OnStarted(void *arg) override;
2790b57cec5SDimitry Andric   void OnCreated(void *arg) override;
2800b57cec5SDimitry Andric   void OnReset() override;
2810b57cec5SDimitry Andric   void OnDetached(void *arg) override;
2820b57cec5SDimitry Andric };
2830b57cec5SDimitry Andric 
2840b57cec5SDimitry Andric struct RacyStacks {
2850b57cec5SDimitry Andric   MD5Hash hash[2];
286349cc55cSDimitry Andric   bool operator==(const RacyStacks &other) const;
2870b57cec5SDimitry Andric };
2880b57cec5SDimitry Andric 
2890b57cec5SDimitry Andric struct RacyAddress {
2900b57cec5SDimitry Andric   uptr addr_min;
2910b57cec5SDimitry Andric   uptr addr_max;
2920b57cec5SDimitry Andric };
2930b57cec5SDimitry Andric 
2940b57cec5SDimitry Andric struct FiredSuppression {
2950b57cec5SDimitry Andric   ReportType type;
2960b57cec5SDimitry Andric   uptr pc_or_addr;
2970b57cec5SDimitry Andric   Suppression *supp;
2980b57cec5SDimitry Andric };
2990b57cec5SDimitry Andric 
3000b57cec5SDimitry Andric struct Context {
3010b57cec5SDimitry Andric   Context();
3020b57cec5SDimitry Andric 
3030b57cec5SDimitry Andric   bool initialized;
3040b57cec5SDimitry Andric #if !SANITIZER_GO
3050b57cec5SDimitry Andric   bool after_multithreaded_fork;
3060b57cec5SDimitry Andric #endif
3070b57cec5SDimitry Andric 
3080b57cec5SDimitry Andric   MetaMap metamap;
3090b57cec5SDimitry Andric 
3100b57cec5SDimitry Andric   Mutex report_mtx;
3110b57cec5SDimitry Andric   int nreported;
3120b57cec5SDimitry Andric   atomic_uint64_t last_symbolize_time_ns;
3130b57cec5SDimitry Andric 
3140b57cec5SDimitry Andric   void *background_thread;
3150b57cec5SDimitry Andric   atomic_uint32_t stop_background_thread;
3160b57cec5SDimitry Andric 
317349cc55cSDimitry Andric   ThreadRegistry thread_registry;
3180b57cec5SDimitry Andric 
319972a253aSDimitry Andric   // This is used to prevent a very unlikely but very pathological behavior.
320972a253aSDimitry Andric   // Since memory access handling is not synchronized with DoReset,
321972a253aSDimitry Andric   // a thread running concurrently with DoReset can leave a bogus shadow value
322972a253aSDimitry Andric   // that will be later falsely detected as a race. For such false races
323972a253aSDimitry Andric   // RestoreStack will return false and we will not report it.
324972a253aSDimitry Andric   // However, consider that a thread leaves a whole lot of such bogus values
325972a253aSDimitry Andric   // and these values are later read by a whole lot of threads.
326972a253aSDimitry Andric   // This will cause massive amounts of ReportRace calls and lots of
327972a253aSDimitry Andric   // serialization. In very pathological cases the resulting slowdown
328972a253aSDimitry Andric   // can be >100x. This is very unlikely, but it was presumably observed
329972a253aSDimitry Andric   // in practice: https://github.com/google/sanitizers/issues/1552
330972a253aSDimitry Andric   // If this happens, previous access sid+epoch will be the same for all of
331972a253aSDimitry Andric   // these false races b/c if the thread will try to increment epoch, it will
332972a253aSDimitry Andric   // notice that DoReset has happened and will stop producing bogus shadow
333972a253aSDimitry Andric   // values. So, last_spurious_race is used to remember the last sid+epoch
334972a253aSDimitry Andric   // for which RestoreStack returned false. Then it is used to filter out
335972a253aSDimitry Andric   // races with the same sid+epoch very early and quickly.
336972a253aSDimitry Andric   // It is of course possible that multiple threads left multiple bogus shadow
337972a253aSDimitry Andric   // values and all of them are read by lots of threads at the same time.
338972a253aSDimitry Andric   // In such case last_spurious_race will only be able to deduplicate a few
339972a253aSDimitry Andric   // races from one thread, then few from another and so on. An alternative
340972a253aSDimitry Andric   // would be to hold an array of such sid+epoch, but we consider such scenario
341972a253aSDimitry Andric   // as even less likely.
342972a253aSDimitry Andric   // Note: this can lead to some rare false negatives as well:
343972a253aSDimitry Andric   // 1. When a legit access with the same sid+epoch participates in a race
344972a253aSDimitry Andric   // as the "previous" memory access, it will be wrongly filtered out.
345972a253aSDimitry Andric   // 2. When RestoreStack returns false for a legit memory access because it
346972a253aSDimitry Andric   // was already evicted from the thread trace, we will still remember it in
347972a253aSDimitry Andric   // last_spurious_race. Then if there is another racing memory access from
348972a253aSDimitry Andric   // the same thread that happened in the same epoch, but was stored in the
349972a253aSDimitry Andric   // next thread trace part (which is still preserved in the thread trace),
350972a253aSDimitry Andric   // we will also wrongly filter it out while RestoreStack would actually
351972a253aSDimitry Andric   // succeed for that second memory access.
352972a253aSDimitry Andric   RawShadow last_spurious_race;
353972a253aSDimitry Andric 
3540b57cec5SDimitry Andric   Mutex racy_mtx;
3550b57cec5SDimitry Andric   Vector<RacyStacks> racy_stacks;
3560b57cec5SDimitry Andric   // Number of fired suppressions may be large enough.
3570b57cec5SDimitry Andric   Mutex fired_suppressions_mtx;
3580b57cec5SDimitry Andric   InternalMmapVector<FiredSuppression> fired_suppressions;
3590b57cec5SDimitry Andric   DDetector *dd;
3600b57cec5SDimitry Andric 
3610b57cec5SDimitry Andric   Flags flags;
362349cc55cSDimitry Andric   fd_t memprof_fd;
3630b57cec5SDimitry Andric 
3640eae32dcSDimitry Andric   // The last slot index (kFreeSid) is used to denote freed memory.
3650eae32dcSDimitry Andric   TidSlot slots[kThreadSlotCount - 1];
3660eae32dcSDimitry Andric 
3670eae32dcSDimitry Andric   // Protects global_epoch, slot_queue, trace_part_recycle.
368349cc55cSDimitry Andric   Mutex slot_mtx;
3690eae32dcSDimitry Andric   uptr global_epoch;  // guarded by slot_mtx and by all slot mutexes
3700eae32dcSDimitry Andric   bool resetting;     // global reset is in progress
37104eeddc0SDimitry Andric   IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
3720eae32dcSDimitry Andric   IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
37304eeddc0SDimitry Andric       SANITIZER_GUARDED_BY(slot_mtx);
37404eeddc0SDimitry Andric   uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
37504eeddc0SDimitry Andric   uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
37604eeddc0SDimitry Andric   uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
377972a253aSDimitry Andric #if SANITIZER_GO
378972a253aSDimitry Andric   uptr mapped_shadow_begin;
379972a253aSDimitry Andric   uptr mapped_shadow_end;
380972a253aSDimitry Andric #endif
3810b57cec5SDimitry Andric };
3820b57cec5SDimitry Andric 
3830b57cec5SDimitry Andric extern Context *ctx;  // The one and the only global runtime context.
3840b57cec5SDimitry Andric 
3850b57cec5SDimitry Andric ALWAYS_INLINE Flags *flags() {
3860b57cec5SDimitry Andric   return &ctx->flags;
3870b57cec5SDimitry Andric }
3880b57cec5SDimitry Andric 
3890b57cec5SDimitry Andric struct ScopedIgnoreInterceptors {
3900b57cec5SDimitry Andric   ScopedIgnoreInterceptors() {
3910b57cec5SDimitry Andric #if !SANITIZER_GO
3920b57cec5SDimitry Andric     cur_thread()->ignore_interceptors++;
3930b57cec5SDimitry Andric #endif
3940b57cec5SDimitry Andric   }
3950b57cec5SDimitry Andric 
3960b57cec5SDimitry Andric   ~ScopedIgnoreInterceptors() {
3970b57cec5SDimitry Andric #if !SANITIZER_GO
3980b57cec5SDimitry Andric     cur_thread()->ignore_interceptors--;
3990b57cec5SDimitry Andric #endif
4000b57cec5SDimitry Andric   }
4010b57cec5SDimitry Andric };
4020b57cec5SDimitry Andric 
4030b57cec5SDimitry Andric const char *GetObjectTypeFromTag(uptr tag);
4040b57cec5SDimitry Andric const char *GetReportHeaderFromTag(uptr tag);
4050b57cec5SDimitry Andric uptr TagFromShadowStackFrame(uptr pc);
4060b57cec5SDimitry Andric 
4070b57cec5SDimitry Andric class ScopedReportBase {
4080b57cec5SDimitry Andric  public:
4090eae32dcSDimitry Andric   void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
4100eae32dcSDimitry Andric                        StackTrace stack, const MutexSet *mset);
4110b57cec5SDimitry Andric   void AddStack(StackTrace stack, bool suppressable = false);
4120b57cec5SDimitry Andric   void AddThread(const ThreadContext *tctx, bool suppressable = false);
4130eae32dcSDimitry Andric   void AddThread(Tid tid, bool suppressable = false);
414349cc55cSDimitry Andric   void AddUniqueTid(Tid unique_tid);
4150eae32dcSDimitry Andric   int AddMutex(uptr addr, StackID creation_stack_id);
4160b57cec5SDimitry Andric   void AddLocation(uptr addr, uptr size);
417349cc55cSDimitry Andric   void AddSleep(StackID stack_id);
4180b57cec5SDimitry Andric   void SetCount(int count);
41981ad6265SDimitry Andric   void SetSigNum(int sig);
4200b57cec5SDimitry Andric 
4210b57cec5SDimitry Andric   const ReportDesc *GetReport() const;
4220b57cec5SDimitry Andric 
4230b57cec5SDimitry Andric  protected:
4240b57cec5SDimitry Andric   ScopedReportBase(ReportType typ, uptr tag);
4250b57cec5SDimitry Andric   ~ScopedReportBase();
4260b57cec5SDimitry Andric 
4270b57cec5SDimitry Andric  private:
4280b57cec5SDimitry Andric   ReportDesc *rep_;
4290b57cec5SDimitry Andric   // Symbolizer makes lots of intercepted calls. If we try to process them,
4300b57cec5SDimitry Andric   // at best it will cause deadlocks on internal mutexes.
4310b57cec5SDimitry Andric   ScopedIgnoreInterceptors ignore_interceptors_;
4320b57cec5SDimitry Andric 
4330b57cec5SDimitry Andric   ScopedReportBase(const ScopedReportBase &) = delete;
4340b57cec5SDimitry Andric   void operator=(const ScopedReportBase &) = delete;
4350b57cec5SDimitry Andric };
4360b57cec5SDimitry Andric 
4370b57cec5SDimitry Andric class ScopedReport : public ScopedReportBase {
4380b57cec5SDimitry Andric  public:
4390b57cec5SDimitry Andric   explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
4400b57cec5SDimitry Andric   ~ScopedReport();
4410b57cec5SDimitry Andric 
4420b57cec5SDimitry Andric  private:
4430b57cec5SDimitry Andric   ScopedErrorReportLock lock_;
4440b57cec5SDimitry Andric };
4450b57cec5SDimitry Andric 
446fe6060f1SDimitry Andric bool ShouldReport(ThreadState *thr, ReportType typ);
4470b57cec5SDimitry Andric ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
4480b57cec5SDimitry Andric 
4490b57cec5SDimitry Andric // The stack could look like:
4500b57cec5SDimitry Andric //   <start> | <main> | <foo> | tag | <bar>
4510b57cec5SDimitry Andric // This will extract the tag and keep:
4520b57cec5SDimitry Andric //   <start> | <main> | <foo> | <bar>
4530b57cec5SDimitry Andric template<typename StackTraceTy>
4540b57cec5SDimitry Andric void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
4550b57cec5SDimitry Andric   if (stack->size < 2) return;
4560b57cec5SDimitry Andric   uptr possible_tag_pc = stack->trace[stack->size - 2];
4570b57cec5SDimitry Andric   uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
4580b57cec5SDimitry Andric   if (possible_tag == kExternalTagNone) return;
4590b57cec5SDimitry Andric   stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
4600b57cec5SDimitry Andric   stack->size -= 1;
4610b57cec5SDimitry Andric   if (tag) *tag = possible_tag;
4620b57cec5SDimitry Andric }
4630b57cec5SDimitry Andric 
4640b57cec5SDimitry Andric template<typename StackTraceTy>
4650b57cec5SDimitry Andric void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
4660b57cec5SDimitry Andric                         uptr *tag = nullptr) {
4670b57cec5SDimitry Andric   uptr size = thr->shadow_stack_pos - thr->shadow_stack;
4680b57cec5SDimitry Andric   uptr start = 0;
4690b57cec5SDimitry Andric   if (size + !!toppc > kStackTraceMax) {
4700b57cec5SDimitry Andric     start = size + !!toppc - kStackTraceMax;
4710b57cec5SDimitry Andric     size = kStackTraceMax - !!toppc;
4720b57cec5SDimitry Andric   }
4730b57cec5SDimitry Andric   stack->Init(&thr->shadow_stack[start], size, toppc);
4740b57cec5SDimitry Andric   ExtractTagFromStack(stack, tag);
4750b57cec5SDimitry Andric }
4760b57cec5SDimitry Andric 
4770b57cec5SDimitry Andric #define GET_STACK_TRACE_FATAL(thr, pc) \
4780b57cec5SDimitry Andric   VarSizeStackTrace stack; \
4790b57cec5SDimitry Andric   ObtainCurrentStack(thr, pc, &stack); \
4800b57cec5SDimitry Andric   stack.ReverseOrder();
4810b57cec5SDimitry Andric 
4820b57cec5SDimitry Andric void MapShadow(uptr addr, uptr size);
4830b57cec5SDimitry Andric void MapThreadTrace(uptr addr, uptr size, const char *name);
4840b57cec5SDimitry Andric void DontNeedShadowFor(uptr addr, uptr size);
48568d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
4860b57cec5SDimitry Andric void InitializeShadowMemory();
487bdd1243dSDimitry Andric void DontDumpShadow(uptr addr, uptr size);
4880b57cec5SDimitry Andric void InitializeInterceptors();
4890b57cec5SDimitry Andric void InitializeLibIgnore();
4900b57cec5SDimitry Andric void InitializeDynamicAnnotations();
4910b57cec5SDimitry Andric 
4920b57cec5SDimitry Andric void ForkBefore(ThreadState *thr, uptr pc);
4930b57cec5SDimitry Andric void ForkParentAfter(ThreadState *thr, uptr pc);
494349cc55cSDimitry Andric void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
4950b57cec5SDimitry Andric 
4960eae32dcSDimitry Andric void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
4970eae32dcSDimitry Andric                 AccessType typ);
4980b57cec5SDimitry Andric bool OutputReport(ThreadState *thr, const ScopedReport &srep);
4990b57cec5SDimitry Andric bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
5000b57cec5SDimitry Andric bool IsExpectedReport(uptr addr, uptr size);
5010b57cec5SDimitry Andric 
5020b57cec5SDimitry Andric #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
5030b57cec5SDimitry Andric # define DPrintf Printf
5040b57cec5SDimitry Andric #else
5050b57cec5SDimitry Andric # define DPrintf(...)
5060b57cec5SDimitry Andric #endif
5070b57cec5SDimitry Andric 
5080b57cec5SDimitry Andric #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
5090b57cec5SDimitry Andric # define DPrintf2 Printf
5100b57cec5SDimitry Andric #else
5110b57cec5SDimitry Andric # define DPrintf2(...)
5120b57cec5SDimitry Andric #endif
5130b57cec5SDimitry Andric 
514349cc55cSDimitry Andric StackID CurrentStackId(ThreadState *thr, uptr pc);
515349cc55cSDimitry Andric ReportStack *SymbolizeStackId(StackID stack_id);
5160b57cec5SDimitry Andric void PrintCurrentStack(ThreadState *thr, uptr pc);
5170b57cec5SDimitry Andric void PrintCurrentStackSlow(uptr pc);  // uses libunwind
518349cc55cSDimitry Andric MBlock *JavaHeapBlock(uptr addr, uptr *start);
5190b57cec5SDimitry Andric 
5200b57cec5SDimitry Andric void Initialize(ThreadState *thr);
5210b57cec5SDimitry Andric void MaybeSpawnBackgroundThread();
5220b57cec5SDimitry Andric int Finalize(ThreadState *thr);
5230b57cec5SDimitry Andric 
5240b57cec5SDimitry Andric void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
5250b57cec5SDimitry Andric void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
5260b57cec5SDimitry Andric 
5270eae32dcSDimitry Andric void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
5280eae32dcSDimitry Andric                   AccessType typ);
529349cc55cSDimitry Andric void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
530349cc55cSDimitry Andric                            AccessType typ);
5310eae32dcSDimitry Andric // This creates 2 non-inlined specialized versions of MemoryAccessRange.
5320eae32dcSDimitry Andric template <bool is_read>
5330eae32dcSDimitry Andric void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
5340b57cec5SDimitry Andric 
535349cc55cSDimitry Andric ALWAYS_INLINE
5360eae32dcSDimitry Andric void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
5370eae32dcSDimitry Andric                        bool is_write) {
5380eae32dcSDimitry Andric   if (size == 0)
5390eae32dcSDimitry Andric     return;
5400eae32dcSDimitry Andric   if (is_write)
5410eae32dcSDimitry Andric     MemoryAccessRangeT<false>(thr, pc, addr, size);
5420eae32dcSDimitry Andric   else
5430eae32dcSDimitry Andric     MemoryAccessRangeT<true>(thr, pc, addr, size);
5440b57cec5SDimitry Andric }
5450b57cec5SDimitry Andric 
5460eae32dcSDimitry Andric void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
5470b57cec5SDimitry Andric void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
5480eae32dcSDimitry Andric void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
5490b57cec5SDimitry Andric void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
55068d75effSDimitry Andric void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
55168d75effSDimitry Andric                                          uptr size);
5520b57cec5SDimitry Andric 
553349cc55cSDimitry Andric void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
554349cc55cSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr);
555349cc55cSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
556349cc55cSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr);
5570b57cec5SDimitry Andric 
558349cc55cSDimitry Andric Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
559349cc55cSDimitry Andric void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
5600b57cec5SDimitry Andric                  ThreadType thread_type);
5610b57cec5SDimitry Andric void ThreadFinish(ThreadState *thr);
562349cc55cSDimitry Andric Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
563349cc55cSDimitry Andric void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
564349cc55cSDimitry Andric void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
5650b57cec5SDimitry Andric void ThreadFinalize(ThreadState *thr);
5660b57cec5SDimitry Andric void ThreadSetName(ThreadState *thr, const char *name);
5670b57cec5SDimitry Andric int ThreadCount(ThreadState *thr);
568349cc55cSDimitry Andric void ProcessPendingSignalsImpl(ThreadState *thr);
569349cc55cSDimitry Andric void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
5700b57cec5SDimitry Andric 
5710b57cec5SDimitry Andric Processor *ProcCreate();
5720b57cec5SDimitry Andric void ProcDestroy(Processor *proc);
5730b57cec5SDimitry Andric void ProcWire(Processor *proc, ThreadState *thr);
5740b57cec5SDimitry Andric void ProcUnwire(Processor *proc, ThreadState *thr);
5750b57cec5SDimitry Andric 
5760b57cec5SDimitry Andric // Note: the parameter is called flagz, because flags is already taken
5770b57cec5SDimitry Andric // by the global function that returns flags.
5780b57cec5SDimitry Andric void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5790b57cec5SDimitry Andric void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5800b57cec5SDimitry Andric void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5810b57cec5SDimitry Andric void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
5820b57cec5SDimitry Andric     int rec = 1);
5830b57cec5SDimitry Andric int  MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5840b57cec5SDimitry Andric void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5850b57cec5SDimitry Andric void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5860b57cec5SDimitry Andric void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
5870b57cec5SDimitry Andric void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
5880b57cec5SDimitry Andric void MutexRepair(ThreadState *thr, uptr pc, uptr addr);  // call on EOWNERDEAD
5890b57cec5SDimitry Andric void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
5900b57cec5SDimitry Andric 
5910b57cec5SDimitry Andric void Acquire(ThreadState *thr, uptr pc, uptr addr);
5920b57cec5SDimitry Andric // AcquireGlobal synchronizes the current thread with all other threads.
5930b57cec5SDimitry Andric // In terms of happens-before relation, it draws a HB edge from all threads
5940b57cec5SDimitry Andric // (where they happen to execute right now) to the current thread. We use it to
5950b57cec5SDimitry Andric // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
5960b57cec5SDimitry Andric // right before executing finalizers. This provides a coarse, but simple
5970b57cec5SDimitry Andric // approximation of the actual required synchronization.
598349cc55cSDimitry Andric void AcquireGlobal(ThreadState *thr);
5990b57cec5SDimitry Andric void Release(ThreadState *thr, uptr pc, uptr addr);
6005ffd83dbSDimitry Andric void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
6010b57cec5SDimitry Andric void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
6020b57cec5SDimitry Andric void AfterSleep(ThreadState *thr, uptr pc);
6030eae32dcSDimitry Andric void IncrementEpoch(ThreadState *thr);
6040b57cec5SDimitry Andric 
6050b57cec5SDimitry Andric #if !SANITIZER_GO
6060b57cec5SDimitry Andric uptr ALWAYS_INLINE HeapEnd() {
6070b57cec5SDimitry Andric   return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
6080b57cec5SDimitry Andric }
6090b57cec5SDimitry Andric #endif
6100b57cec5SDimitry Andric 
61104eeddc0SDimitry Andric void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
6120eae32dcSDimitry Andric void SlotDetach(ThreadState *thr);
61304eeddc0SDimitry Andric void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
61404eeddc0SDimitry Andric void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
6150eae32dcSDimitry Andric void DoReset(ThreadState *thr, uptr epoch);
6160eae32dcSDimitry Andric void FlushShadowMemory();
6170eae32dcSDimitry Andric 
6180b57cec5SDimitry Andric ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
6190b57cec5SDimitry Andric void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
6200b57cec5SDimitry Andric void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
6210b57cec5SDimitry Andric 
6220b57cec5SDimitry Andric // These need to match __tsan_switch_to_fiber_* flags defined in
6230b57cec5SDimitry Andric // tsan_interface.h. See documentation there as well.
6240b57cec5SDimitry Andric enum FiberSwitchFlags {
6250b57cec5SDimitry Andric   FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
6260b57cec5SDimitry Andric };
6270b57cec5SDimitry Andric 
6280eae32dcSDimitry Andric class SlotLocker {
6290eae32dcSDimitry Andric  public:
6300eae32dcSDimitry Andric   ALWAYS_INLINE
6310eae32dcSDimitry Andric   SlotLocker(ThreadState *thr, bool recursive = false)
6320eae32dcSDimitry Andric       : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
633bdd1243dSDimitry Andric #if !SANITIZER_GO
634bdd1243dSDimitry Andric     // We are in trouble if we are here with in_blocking_func set.
635bdd1243dSDimitry Andric     // If in_blocking_func is set, all signals will be delivered synchronously,
636bdd1243dSDimitry Andric     // which means we can't lock slots since the signal handler will try
637bdd1243dSDimitry Andric     // to lock it recursively and deadlock.
638bdd1243dSDimitry Andric     DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
639bdd1243dSDimitry Andric #endif
6400eae32dcSDimitry Andric     if (!locked_)
6410eae32dcSDimitry Andric       SlotLock(thr_);
6420eae32dcSDimitry Andric   }
6430eae32dcSDimitry Andric 
6440eae32dcSDimitry Andric   ALWAYS_INLINE
6450eae32dcSDimitry Andric   ~SlotLocker() {
6460eae32dcSDimitry Andric     if (!locked_)
6470eae32dcSDimitry Andric       SlotUnlock(thr_);
6480eae32dcSDimitry Andric   }
6490eae32dcSDimitry Andric 
6500eae32dcSDimitry Andric  private:
6510eae32dcSDimitry Andric   ThreadState *thr_;
6520eae32dcSDimitry Andric   bool locked_;
6530eae32dcSDimitry Andric };
6540eae32dcSDimitry Andric 
6550eae32dcSDimitry Andric class SlotUnlocker {
6560eae32dcSDimitry Andric  public:
6570eae32dcSDimitry Andric   SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
6580eae32dcSDimitry Andric     if (locked_)
6590eae32dcSDimitry Andric       SlotUnlock(thr_);
6600eae32dcSDimitry Andric   }
6610eae32dcSDimitry Andric 
6620eae32dcSDimitry Andric   ~SlotUnlocker() {
6630eae32dcSDimitry Andric     if (locked_)
6640eae32dcSDimitry Andric       SlotLock(thr_);
6650eae32dcSDimitry Andric   }
6660eae32dcSDimitry Andric 
6670eae32dcSDimitry Andric  private:
6680eae32dcSDimitry Andric   ThreadState *thr_;
6690eae32dcSDimitry Andric   bool locked_;
6700eae32dcSDimitry Andric };
6710eae32dcSDimitry Andric 
672349cc55cSDimitry Andric ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
673349cc55cSDimitry Andric   if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
674349cc55cSDimitry Andric     ProcessPendingSignalsImpl(thr);
675349cc55cSDimitry Andric }
676349cc55cSDimitry Andric 
677349cc55cSDimitry Andric extern bool is_initialized;
678349cc55cSDimitry Andric 
679349cc55cSDimitry Andric ALWAYS_INLINE
680349cc55cSDimitry Andric void LazyInitialize(ThreadState *thr) {
681349cc55cSDimitry Andric   // If we can use .preinit_array, assume that __tsan_init
682349cc55cSDimitry Andric   // called from .preinit_array initializes runtime before
683bdd1243dSDimitry Andric   // any instrumented code except when tsan is used as a
684bdd1243dSDimitry Andric   // shared library.
685bdd1243dSDimitry Andric #if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
686349cc55cSDimitry Andric   if (UNLIKELY(!is_initialized))
687349cc55cSDimitry Andric     Initialize(thr);
688349cc55cSDimitry Andric #endif
689349cc55cSDimitry Andric }
690349cc55cSDimitry Andric 
6910eae32dcSDimitry Andric void TraceResetForTesting();
692349cc55cSDimitry Andric void TraceSwitchPart(ThreadState *thr);
6930eae32dcSDimitry Andric void TraceSwitchPartImpl(ThreadState *thr);
6940eae32dcSDimitry Andric bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
6950eae32dcSDimitry Andric                   AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
696349cc55cSDimitry Andric                   MutexSet *pmset, uptr *ptag);
697349cc55cSDimitry Andric 
698349cc55cSDimitry Andric template <typename EventT>
699349cc55cSDimitry Andric ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
700349cc55cSDimitry Andric                                                    EventT **ev) {
7010eae32dcSDimitry Andric   // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
7020eae32dcSDimitry Andric   // so we check it here proactively.
7030eae32dcSDimitry Andric   DCHECK(thr->shadow_stack);
704349cc55cSDimitry Andric   Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
705349cc55cSDimitry Andric #if SANITIZER_DEBUG
706349cc55cSDimitry Andric   // TraceSwitch acquires these mutexes,
707349cc55cSDimitry Andric   // so we lock them here to detect deadlocks more reliably.
708349cc55cSDimitry Andric   { Lock lock(&ctx->slot_mtx); }
709349cc55cSDimitry Andric   { Lock lock(&thr->tctx->trace.mtx); }
710349cc55cSDimitry Andric   TracePart *current = thr->tctx->trace.parts.Back();
711349cc55cSDimitry Andric   if (current) {
712349cc55cSDimitry Andric     DCHECK_GE(pos, &current->events[0]);
713349cc55cSDimitry Andric     DCHECK_LE(pos, &current->events[TracePart::kSize]);
714349cc55cSDimitry Andric   } else {
715349cc55cSDimitry Andric     DCHECK_EQ(pos, nullptr);
716349cc55cSDimitry Andric   }
717349cc55cSDimitry Andric #endif
718349cc55cSDimitry Andric   // TracePart is allocated with mmap and is at least 4K aligned.
719349cc55cSDimitry Andric   // So the following check is a faster way to check for part end.
720349cc55cSDimitry Andric   // It may have false positives in the middle of the trace,
721349cc55cSDimitry Andric   // they are filtered out in TraceSwitch.
722349cc55cSDimitry Andric   if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
723349cc55cSDimitry Andric     return false;
724349cc55cSDimitry Andric   *ev = reinterpret_cast<EventT *>(pos);
725349cc55cSDimitry Andric   return true;
726349cc55cSDimitry Andric }
727349cc55cSDimitry Andric 
728349cc55cSDimitry Andric template <typename EventT>
729349cc55cSDimitry Andric ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
730349cc55cSDimitry Andric   DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
731349cc55cSDimitry Andric   atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
732349cc55cSDimitry Andric }
733349cc55cSDimitry Andric 
734349cc55cSDimitry Andric template <typename EventT>
735349cc55cSDimitry Andric void TraceEvent(ThreadState *thr, EventT ev) {
736349cc55cSDimitry Andric   EventT *evp;
737349cc55cSDimitry Andric   if (!TraceAcquire(thr, &evp)) {
738349cc55cSDimitry Andric     TraceSwitchPart(thr);
739349cc55cSDimitry Andric     UNUSED bool res = TraceAcquire(thr, &evp);
740349cc55cSDimitry Andric     DCHECK(res);
741349cc55cSDimitry Andric   }
742349cc55cSDimitry Andric   *evp = ev;
743349cc55cSDimitry Andric   TraceRelease(thr, evp);
744349cc55cSDimitry Andric }
745349cc55cSDimitry Andric 
746349cc55cSDimitry Andric ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
747349cc55cSDimitry Andric                                                    uptr pc = 0) {
748349cc55cSDimitry Andric   if (!kCollectHistory)
749349cc55cSDimitry Andric     return true;
750349cc55cSDimitry Andric   EventFunc *ev;
751349cc55cSDimitry Andric   if (UNLIKELY(!TraceAcquire(thr, &ev)))
752349cc55cSDimitry Andric     return false;
753349cc55cSDimitry Andric   ev->is_access = 0;
754349cc55cSDimitry Andric   ev->is_func = 1;
755349cc55cSDimitry Andric   ev->pc = pc;
756349cc55cSDimitry Andric   TraceRelease(thr, ev);
757349cc55cSDimitry Andric   return true;
758349cc55cSDimitry Andric }
759349cc55cSDimitry Andric 
760349cc55cSDimitry Andric WARN_UNUSED_RESULT
761349cc55cSDimitry Andric bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
762349cc55cSDimitry Andric                           AccessType typ);
763349cc55cSDimitry Andric WARN_UNUSED_RESULT
764349cc55cSDimitry Andric bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
765349cc55cSDimitry Andric                                AccessType typ);
766349cc55cSDimitry Andric void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
767349cc55cSDimitry Andric                             AccessType typ);
768349cc55cSDimitry Andric void TraceFunc(ThreadState *thr, uptr pc = 0);
769349cc55cSDimitry Andric void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
770349cc55cSDimitry Andric                     StackID stk);
771349cc55cSDimitry Andric void TraceMutexUnlock(ThreadState *thr, uptr addr);
772349cc55cSDimitry Andric void TraceTime(ThreadState *thr);
773349cc55cSDimitry Andric 
7740eae32dcSDimitry Andric void TraceRestartFuncExit(ThreadState *thr);
7750eae32dcSDimitry Andric void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
776349cc55cSDimitry Andric 
777349cc55cSDimitry Andric void GrowShadowStack(ThreadState *thr);
778349cc55cSDimitry Andric 
779349cc55cSDimitry Andric ALWAYS_INLINE
780349cc55cSDimitry Andric void FuncEntry(ThreadState *thr, uptr pc) {
7810eae32dcSDimitry Andric   DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
7820eae32dcSDimitry Andric   if (UNLIKELY(!TryTraceFunc(thr, pc)))
7830eae32dcSDimitry Andric     return TraceRestartFuncEntry(thr, pc);
784349cc55cSDimitry Andric   DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
785349cc55cSDimitry Andric #if !SANITIZER_GO
786349cc55cSDimitry Andric   DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
787349cc55cSDimitry Andric #else
788349cc55cSDimitry Andric   if (thr->shadow_stack_pos == thr->shadow_stack_end)
789349cc55cSDimitry Andric     GrowShadowStack(thr);
790349cc55cSDimitry Andric #endif
791349cc55cSDimitry Andric   thr->shadow_stack_pos[0] = pc;
792349cc55cSDimitry Andric   thr->shadow_stack_pos++;
793349cc55cSDimitry Andric }
794349cc55cSDimitry Andric 
795349cc55cSDimitry Andric ALWAYS_INLINE
796349cc55cSDimitry Andric void FuncExit(ThreadState *thr) {
7970eae32dcSDimitry Andric   DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
7980eae32dcSDimitry Andric   if (UNLIKELY(!TryTraceFunc(thr, 0)))
7990eae32dcSDimitry Andric     return TraceRestartFuncExit(thr);
800349cc55cSDimitry Andric   DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
801349cc55cSDimitry Andric #if !SANITIZER_GO
802349cc55cSDimitry Andric   DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
803349cc55cSDimitry Andric #endif
804349cc55cSDimitry Andric   thr->shadow_stack_pos--;
805349cc55cSDimitry Andric }
806349cc55cSDimitry Andric 
807349cc55cSDimitry Andric #if !SANITIZER_GO
808349cc55cSDimitry Andric extern void (*on_initialize)(void);
809349cc55cSDimitry Andric extern int (*on_finalize)(int);
810349cc55cSDimitry Andric #endif
8110b57cec5SDimitry Andric }  // namespace __tsan
8120b57cec5SDimitry Andric 
8130b57cec5SDimitry Andric #endif  // TSAN_RTL_H
814