13cab2bb3Spatrick //===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick // Main internal TSan header file.
123cab2bb3Spatrick //
133cab2bb3Spatrick // Ground rules:
143cab2bb3Spatrick // - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
153cab2bb3Spatrick // function-scope locals)
163cab2bb3Spatrick // - All functions/classes/etc reside in namespace __tsan, except for those
173cab2bb3Spatrick // declared in tsan_interface.h.
183cab2bb3Spatrick // - Platform-specific files should be used instead of ifdefs (*).
193cab2bb3Spatrick // - No system headers included in header files (*).
203cab2bb3Spatrick // - Platform specific headres included only into platform-specific files (*).
213cab2bb3Spatrick //
223cab2bb3Spatrick // (*) Except when inlining is critical for performance.
233cab2bb3Spatrick //===----------------------------------------------------------------------===//
243cab2bb3Spatrick
253cab2bb3Spatrick #ifndef TSAN_RTL_H
263cab2bb3Spatrick #define TSAN_RTL_H
273cab2bb3Spatrick
283cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator.h"
293cab2bb3Spatrick #include "sanitizer_common/sanitizer_allocator_internal.h"
303cab2bb3Spatrick #include "sanitizer_common/sanitizer_asm.h"
313cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
323cab2bb3Spatrick #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
333cab2bb3Spatrick #include "sanitizer_common/sanitizer_libignore.h"
343cab2bb3Spatrick #include "sanitizer_common/sanitizer_suppressions.h"
353cab2bb3Spatrick #include "sanitizer_common/sanitizer_thread_registry.h"
363cab2bb3Spatrick #include "sanitizer_common/sanitizer_vector.h"
373cab2bb3Spatrick #include "tsan_defs.h"
383cab2bb3Spatrick #include "tsan_flags.h"
39*810390e3Srobert #include "tsan_ignoreset.h"
40*810390e3Srobert #include "tsan_ilist.h"
413cab2bb3Spatrick #include "tsan_mman.h"
42*810390e3Srobert #include "tsan_mutexset.h"
43*810390e3Srobert #include "tsan_platform.h"
44*810390e3Srobert #include "tsan_report.h"
45*810390e3Srobert #include "tsan_shadow.h"
46*810390e3Srobert #include "tsan_stack_trace.h"
473cab2bb3Spatrick #include "tsan_sync.h"
483cab2bb3Spatrick #include "tsan_trace.h"
49*810390e3Srobert #include "tsan_vector_clock.h"
503cab2bb3Spatrick
513cab2bb3Spatrick #if SANITIZER_WORDSIZE != 64
523cab2bb3Spatrick # error "ThreadSanitizer is supported only on 64-bit platforms"
533cab2bb3Spatrick #endif
543cab2bb3Spatrick
553cab2bb3Spatrick namespace __tsan {
563cab2bb3Spatrick
573cab2bb3Spatrick #if !SANITIZER_GO
583cab2bb3Spatrick struct MapUnmapCallback;
59*810390e3Srobert #if defined(__mips64) || defined(__aarch64__) || defined(__loongarch__) || \
60*810390e3Srobert defined(__powerpc__)
613cab2bb3Spatrick
623cab2bb3Spatrick struct AP32 {
633cab2bb3Spatrick static const uptr kSpaceBeg = 0;
643cab2bb3Spatrick static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
653cab2bb3Spatrick static const uptr kMetadataSize = 0;
663cab2bb3Spatrick typedef __sanitizer::CompactSizeClassMap SizeClassMap;
673cab2bb3Spatrick static const uptr kRegionSizeLog = 20;
683cab2bb3Spatrick using AddressSpaceView = LocalAddressSpaceView;
693cab2bb3Spatrick typedef __tsan::MapUnmapCallback MapUnmapCallback;
703cab2bb3Spatrick static const uptr kFlags = 0;
713cab2bb3Spatrick };
723cab2bb3Spatrick typedef SizeClassAllocator32<AP32> PrimaryAllocator;
733cab2bb3Spatrick #else
743cab2bb3Spatrick struct AP64 { // Allocator64 parameters. Deliberately using a short name.
75*810390e3Srobert # if defined(__s390x__)
76*810390e3Srobert typedef MappingS390x Mapping;
77*810390e3Srobert # else
78*810390e3Srobert typedef Mapping48AddressSpace Mapping;
79*810390e3Srobert # endif
803cab2bb3Spatrick static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
813cab2bb3Spatrick static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
823cab2bb3Spatrick static const uptr kMetadataSize = 0;
833cab2bb3Spatrick typedef DefaultSizeClassMap SizeClassMap;
843cab2bb3Spatrick typedef __tsan::MapUnmapCallback MapUnmapCallback;
853cab2bb3Spatrick static const uptr kFlags = 0;
863cab2bb3Spatrick using AddressSpaceView = LocalAddressSpaceView;
873cab2bb3Spatrick };
883cab2bb3Spatrick typedef SizeClassAllocator64<AP64> PrimaryAllocator;
893cab2bb3Spatrick #endif
903cab2bb3Spatrick typedef CombinedAllocator<PrimaryAllocator> Allocator;
913cab2bb3Spatrick typedef Allocator::AllocatorCache AllocatorCache;
923cab2bb3Spatrick Allocator *allocator();
933cab2bb3Spatrick #endif
943cab2bb3Spatrick
953cab2bb3Spatrick struct ThreadSignalContext;
963cab2bb3Spatrick
973cab2bb3Spatrick struct JmpBuf {
983cab2bb3Spatrick uptr sp;
993cab2bb3Spatrick int int_signal_send;
1003cab2bb3Spatrick bool in_blocking_func;
1013cab2bb3Spatrick uptr in_signal_handler;
1023cab2bb3Spatrick uptr *shadow_stack_pos;
1033cab2bb3Spatrick };
1043cab2bb3Spatrick
1053cab2bb3Spatrick // A Processor represents a physical thread, or a P for Go.
1063cab2bb3Spatrick // It is used to store internal resources like allocate cache, and does not
1073cab2bb3Spatrick // participate in race-detection logic (invisible to end user).
1083cab2bb3Spatrick // In C++ it is tied to an OS thread just like ThreadState, however ideally
1093cab2bb3Spatrick // it should be tied to a CPU (this way we will have fewer allocator caches).
1103cab2bb3Spatrick // In Go it is tied to a P, so there are significantly fewer Processor's than
1113cab2bb3Spatrick // ThreadState's (which are tied to Gs).
1123cab2bb3Spatrick // A ThreadState must be wired with a Processor to handle events.
1133cab2bb3Spatrick struct Processor {
1143cab2bb3Spatrick ThreadState *thr; // currently wired thread, or nullptr
1153cab2bb3Spatrick #if !SANITIZER_GO
1163cab2bb3Spatrick AllocatorCache alloc_cache;
1173cab2bb3Spatrick InternalAllocatorCache internal_alloc_cache;
1183cab2bb3Spatrick #endif
1193cab2bb3Spatrick DenseSlabAllocCache block_cache;
1203cab2bb3Spatrick DenseSlabAllocCache sync_cache;
1213cab2bb3Spatrick DDPhysicalThread *dd_pt;
1223cab2bb3Spatrick };
1233cab2bb3Spatrick
1243cab2bb3Spatrick #if !SANITIZER_GO
1253cab2bb3Spatrick // ScopedGlobalProcessor temporary setups a global processor for the current
1263cab2bb3Spatrick // thread, if it does not have one. Intended for interceptors that can run
1273cab2bb3Spatrick // at the very thread end, when we already destroyed the thread processor.
1283cab2bb3Spatrick struct ScopedGlobalProcessor {
1293cab2bb3Spatrick ScopedGlobalProcessor();
1303cab2bb3Spatrick ~ScopedGlobalProcessor();
1313cab2bb3Spatrick };
1323cab2bb3Spatrick #endif
1333cab2bb3Spatrick
134*810390e3Srobert struct TidEpoch {
135*810390e3Srobert Tid tid;
136*810390e3Srobert Epoch epoch;
137*810390e3Srobert };
138*810390e3Srobert
139*810390e3Srobert struct TidSlot {
140*810390e3Srobert Mutex mtx;
141*810390e3Srobert Sid sid;
142*810390e3Srobert atomic_uint32_t raw_epoch;
143*810390e3Srobert ThreadState *thr;
144*810390e3Srobert Vector<TidEpoch> journal;
145*810390e3Srobert INode node;
146*810390e3Srobert
epochTidSlot147*810390e3Srobert Epoch epoch() const {
148*810390e3Srobert return static_cast<Epoch>(atomic_load(&raw_epoch, memory_order_relaxed));
149*810390e3Srobert }
150*810390e3Srobert
SetEpochTidSlot151*810390e3Srobert void SetEpoch(Epoch v) {
152*810390e3Srobert atomic_store(&raw_epoch, static_cast<u32>(v), memory_order_relaxed);
153*810390e3Srobert }
154*810390e3Srobert
155*810390e3Srobert TidSlot();
156*810390e3Srobert } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
157*810390e3Srobert
1583cab2bb3Spatrick // This struct is stored in TLS.
1593cab2bb3Spatrick struct ThreadState {
1603cab2bb3Spatrick FastState fast_state;
161*810390e3Srobert int ignore_sync;
162*810390e3Srobert #if !SANITIZER_GO
163*810390e3Srobert int ignore_interceptors;
164*810390e3Srobert #endif
165*810390e3Srobert uptr *shadow_stack_pos;
166*810390e3Srobert
167*810390e3Srobert // Current position in tctx->trace.Back()->events (Event*).
168*810390e3Srobert atomic_uintptr_t trace_pos;
169*810390e3Srobert // PC of the last memory access, used to compute PC deltas in the trace.
170*810390e3Srobert uptr trace_prev_pc;
171*810390e3Srobert
1723cab2bb3Spatrick // Technically `current` should be a separate THREADLOCAL variable;
1733cab2bb3Spatrick // but it is placed here in order to share cache line with previous fields.
1743cab2bb3Spatrick ThreadState* current;
175*810390e3Srobert
176*810390e3Srobert atomic_sint32_t pending_signals;
177*810390e3Srobert
178*810390e3Srobert VectorClock clock;
179*810390e3Srobert
1803cab2bb3Spatrick // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
1813cab2bb3Spatrick // We do not distinguish beteween ignoring reads and writes
1823cab2bb3Spatrick // for better performance.
1833cab2bb3Spatrick int ignore_reads_and_writes;
1843cab2bb3Spatrick int suppress_reports;
1853cab2bb3Spatrick // Go does not support ignores.
1863cab2bb3Spatrick #if !SANITIZER_GO
1873cab2bb3Spatrick IgnoreSet mop_ignore_set;
1883cab2bb3Spatrick IgnoreSet sync_ignore_set;
1893cab2bb3Spatrick #endif
1903cab2bb3Spatrick uptr *shadow_stack;
1913cab2bb3Spatrick uptr *shadow_stack_end;
1923cab2bb3Spatrick #if !SANITIZER_GO
1933cab2bb3Spatrick Vector<JmpBuf> jmp_bufs;
194*810390e3Srobert int in_symbolizer;
195*810390e3Srobert atomic_uintptr_t in_blocking_func;
1963cab2bb3Spatrick bool in_ignored_lib;
1973cab2bb3Spatrick bool is_inited;
198*810390e3Srobert #endif
199*810390e3Srobert MutexSet mset;
2003cab2bb3Spatrick bool is_dead;
201*810390e3Srobert const Tid tid;
202*810390e3Srobert uptr stk_addr;
203*810390e3Srobert uptr stk_size;
204*810390e3Srobert uptr tls_addr;
205*810390e3Srobert uptr tls_size;
2063cab2bb3Spatrick ThreadContext *tctx;
2073cab2bb3Spatrick
2083cab2bb3Spatrick DDLogicalThread *dd_lt;
2093cab2bb3Spatrick
210*810390e3Srobert TidSlot *slot;
211*810390e3Srobert uptr slot_epoch;
212*810390e3Srobert bool slot_locked;
213*810390e3Srobert
2143cab2bb3Spatrick // Current wired Processor, or nullptr. Required to handle any events.
2153cab2bb3Spatrick Processor *proc1;
2163cab2bb3Spatrick #if !SANITIZER_GO
procThreadState2173cab2bb3Spatrick Processor *proc() { return proc1; }
2183cab2bb3Spatrick #else
2193cab2bb3Spatrick Processor *proc();
2203cab2bb3Spatrick #endif
2213cab2bb3Spatrick
2223cab2bb3Spatrick atomic_uintptr_t in_signal_handler;
223*810390e3Srobert atomic_uintptr_t signal_ctx;
2243cab2bb3Spatrick
2253cab2bb3Spatrick #if !SANITIZER_GO
226*810390e3Srobert StackID last_sleep_stack_id;
227*810390e3Srobert VectorClock last_sleep_clock;
2283cab2bb3Spatrick #endif
2293cab2bb3Spatrick
2303cab2bb3Spatrick // Set in regions of runtime that must be signal-safe and fork-safe.
2313cab2bb3Spatrick // If set, malloc must not be called.
2323cab2bb3Spatrick int nomalloc;
2333cab2bb3Spatrick
2343cab2bb3Spatrick const ReportDesc *current_report;
2353cab2bb3Spatrick
236*810390e3Srobert explicit ThreadState(Tid tid);
237*810390e3Srobert } ALIGNED(SANITIZER_CACHE_LINE_SIZE);
2383cab2bb3Spatrick
2393cab2bb3Spatrick #if !SANITIZER_GO
240*810390e3Srobert #if SANITIZER_APPLE || SANITIZER_ANDROID
2413cab2bb3Spatrick ThreadState *cur_thread();
2423cab2bb3Spatrick void set_cur_thread(ThreadState *thr);
2433cab2bb3Spatrick void cur_thread_finalize();
cur_thread_init()244*810390e3Srobert inline ThreadState *cur_thread_init() { return cur_thread(); }
2453cab2bb3Spatrick # else
2463cab2bb3Spatrick __attribute__((tls_model("initial-exec")))
2473cab2bb3Spatrick extern THREADLOCAL char cur_thread_placeholder[];
cur_thread()248d89ec533Spatrick inline ThreadState *cur_thread() {
2493cab2bb3Spatrick return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
2503cab2bb3Spatrick }
cur_thread_init()251*810390e3Srobert inline ThreadState *cur_thread_init() {
2523cab2bb3Spatrick ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
2533cab2bb3Spatrick if (UNLIKELY(!thr->current))
2543cab2bb3Spatrick thr->current = thr;
255*810390e3Srobert return thr->current;
2563cab2bb3Spatrick }
set_cur_thread(ThreadState * thr)257d89ec533Spatrick inline void set_cur_thread(ThreadState *thr) {
2583cab2bb3Spatrick reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
2593cab2bb3Spatrick }
cur_thread_finalize()260d89ec533Spatrick inline void cur_thread_finalize() { }
261*810390e3Srobert # endif // SANITIZER_APPLE || SANITIZER_ANDROID
2623cab2bb3Spatrick #endif // SANITIZER_GO
2633cab2bb3Spatrick
264d89ec533Spatrick class ThreadContext final : public ThreadContextBase {
2653cab2bb3Spatrick public:
266*810390e3Srobert explicit ThreadContext(Tid tid);
2673cab2bb3Spatrick ~ThreadContext();
2683cab2bb3Spatrick ThreadState *thr;
269*810390e3Srobert StackID creation_stack_id;
270*810390e3Srobert VectorClock *sync;
271*810390e3Srobert uptr sync_epoch;
272*810390e3Srobert Trace trace;
2733cab2bb3Spatrick
2743cab2bb3Spatrick // Override superclass callbacks.
2753cab2bb3Spatrick void OnDead() override;
2763cab2bb3Spatrick void OnJoined(void *arg) override;
2773cab2bb3Spatrick void OnFinished() override;
2783cab2bb3Spatrick void OnStarted(void *arg) override;
2793cab2bb3Spatrick void OnCreated(void *arg) override;
2803cab2bb3Spatrick void OnReset() override;
2813cab2bb3Spatrick void OnDetached(void *arg) override;
2823cab2bb3Spatrick };
2833cab2bb3Spatrick
2843cab2bb3Spatrick struct RacyStacks {
2853cab2bb3Spatrick MD5Hash hash[2];
286*810390e3Srobert bool operator==(const RacyStacks &other) const;
2873cab2bb3Spatrick };
2883cab2bb3Spatrick
2893cab2bb3Spatrick struct RacyAddress {
2903cab2bb3Spatrick uptr addr_min;
2913cab2bb3Spatrick uptr addr_max;
2923cab2bb3Spatrick };
2933cab2bb3Spatrick
2943cab2bb3Spatrick struct FiredSuppression {
2953cab2bb3Spatrick ReportType type;
2963cab2bb3Spatrick uptr pc_or_addr;
2973cab2bb3Spatrick Suppression *supp;
2983cab2bb3Spatrick };
2993cab2bb3Spatrick
3003cab2bb3Spatrick struct Context {
3013cab2bb3Spatrick Context();
3023cab2bb3Spatrick
3033cab2bb3Spatrick bool initialized;
3043cab2bb3Spatrick #if !SANITIZER_GO
3053cab2bb3Spatrick bool after_multithreaded_fork;
3063cab2bb3Spatrick #endif
3073cab2bb3Spatrick
3083cab2bb3Spatrick MetaMap metamap;
3093cab2bb3Spatrick
3103cab2bb3Spatrick Mutex report_mtx;
3113cab2bb3Spatrick int nreported;
3123cab2bb3Spatrick atomic_uint64_t last_symbolize_time_ns;
3133cab2bb3Spatrick
3143cab2bb3Spatrick void *background_thread;
3153cab2bb3Spatrick atomic_uint32_t stop_background_thread;
3163cab2bb3Spatrick
317*810390e3Srobert ThreadRegistry thread_registry;
318*810390e3Srobert
319*810390e3Srobert // This is used to prevent a very unlikely but very pathological behavior.
320*810390e3Srobert // Since memory access handling is not synchronized with DoReset,
321*810390e3Srobert // a thread running concurrently with DoReset can leave a bogus shadow value
322*810390e3Srobert // that will be later falsely detected as a race. For such false races
323*810390e3Srobert // RestoreStack will return false and we will not report it.
324*810390e3Srobert // However, consider that a thread leaves a whole lot of such bogus values
325*810390e3Srobert // and these values are later read by a whole lot of threads.
326*810390e3Srobert // This will cause massive amounts of ReportRace calls and lots of
327*810390e3Srobert // serialization. In very pathological cases the resulting slowdown
328*810390e3Srobert // can be >100x. This is very unlikely, but it was presumably observed
329*810390e3Srobert // in practice: https://github.com/google/sanitizers/issues/1552
330*810390e3Srobert // If this happens, previous access sid+epoch will be the same for all of
331*810390e3Srobert // these false races b/c if the thread will try to increment epoch, it will
332*810390e3Srobert // notice that DoReset has happened and will stop producing bogus shadow
333*810390e3Srobert // values. So, last_spurious_race is used to remember the last sid+epoch
334*810390e3Srobert // for which RestoreStack returned false. Then it is used to filter out
335*810390e3Srobert // races with the same sid+epoch very early and quickly.
336*810390e3Srobert // It is of course possible that multiple threads left multiple bogus shadow
337*810390e3Srobert // values and all of them are read by lots of threads at the same time.
338*810390e3Srobert // In such case last_spurious_race will only be able to deduplicate a few
339*810390e3Srobert // races from one thread, then few from another and so on. An alternative
340*810390e3Srobert // would be to hold an array of such sid+epoch, but we consider such scenario
341*810390e3Srobert // as even less likely.
342*810390e3Srobert // Note: this can lead to some rare false negatives as well:
343*810390e3Srobert // 1. When a legit access with the same sid+epoch participates in a race
344*810390e3Srobert // as the "previous" memory access, it will be wrongly filtered out.
345*810390e3Srobert // 2. When RestoreStack returns false for a legit memory access because it
346*810390e3Srobert // was already evicted from the thread trace, we will still remember it in
347*810390e3Srobert // last_spurious_race. Then if there is another racing memory access from
348*810390e3Srobert // the same thread that happened in the same epoch, but was stored in the
349*810390e3Srobert // next thread trace part (which is still preserved in the thread trace),
350*810390e3Srobert // we will also wrongly filter it out while RestoreStack would actually
351*810390e3Srobert // succeed for that second memory access.
352*810390e3Srobert RawShadow last_spurious_race;
3533cab2bb3Spatrick
3543cab2bb3Spatrick Mutex racy_mtx;
3553cab2bb3Spatrick Vector<RacyStacks> racy_stacks;
3563cab2bb3Spatrick // Number of fired suppressions may be large enough.
3573cab2bb3Spatrick Mutex fired_suppressions_mtx;
3583cab2bb3Spatrick InternalMmapVector<FiredSuppression> fired_suppressions;
3593cab2bb3Spatrick DDetector *dd;
3603cab2bb3Spatrick
3613cab2bb3Spatrick Flags flags;
362*810390e3Srobert fd_t memprof_fd;
3633cab2bb3Spatrick
364*810390e3Srobert // The last slot index (kFreeSid) is used to denote freed memory.
365*810390e3Srobert TidSlot slots[kThreadSlotCount - 1];
366*810390e3Srobert
367*810390e3Srobert // Protects global_epoch, slot_queue, trace_part_recycle.
368*810390e3Srobert Mutex slot_mtx;
369*810390e3Srobert uptr global_epoch; // guarded by slot_mtx and by all slot mutexes
370*810390e3Srobert bool resetting; // global reset is in progress
371*810390e3Srobert IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
372*810390e3Srobert IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
373*810390e3Srobert SANITIZER_GUARDED_BY(slot_mtx);
374*810390e3Srobert uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
375*810390e3Srobert uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
376*810390e3Srobert uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
377*810390e3Srobert #if SANITIZER_GO
378*810390e3Srobert uptr mapped_shadow_begin;
379*810390e3Srobert uptr mapped_shadow_end;
380*810390e3Srobert #endif
3813cab2bb3Spatrick };
3823cab2bb3Spatrick
3833cab2bb3Spatrick extern Context *ctx; // The one and the only global runtime context.
3843cab2bb3Spatrick
flags()3853cab2bb3Spatrick ALWAYS_INLINE Flags *flags() {
3863cab2bb3Spatrick return &ctx->flags;
3873cab2bb3Spatrick }
3883cab2bb3Spatrick
3893cab2bb3Spatrick struct ScopedIgnoreInterceptors {
ScopedIgnoreInterceptorsScopedIgnoreInterceptors3903cab2bb3Spatrick ScopedIgnoreInterceptors() {
3913cab2bb3Spatrick #if !SANITIZER_GO
3923cab2bb3Spatrick cur_thread()->ignore_interceptors++;
3933cab2bb3Spatrick #endif
3943cab2bb3Spatrick }
3953cab2bb3Spatrick
~ScopedIgnoreInterceptorsScopedIgnoreInterceptors3963cab2bb3Spatrick ~ScopedIgnoreInterceptors() {
3973cab2bb3Spatrick #if !SANITIZER_GO
3983cab2bb3Spatrick cur_thread()->ignore_interceptors--;
3993cab2bb3Spatrick #endif
4003cab2bb3Spatrick }
4013cab2bb3Spatrick };
4023cab2bb3Spatrick
4033cab2bb3Spatrick const char *GetObjectTypeFromTag(uptr tag);
4043cab2bb3Spatrick const char *GetReportHeaderFromTag(uptr tag);
4053cab2bb3Spatrick uptr TagFromShadowStackFrame(uptr pc);
4063cab2bb3Spatrick
4073cab2bb3Spatrick class ScopedReportBase {
4083cab2bb3Spatrick public:
409*810390e3Srobert void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, Tid tid,
410*810390e3Srobert StackTrace stack, const MutexSet *mset);
4113cab2bb3Spatrick void AddStack(StackTrace stack, bool suppressable = false);
4123cab2bb3Spatrick void AddThread(const ThreadContext *tctx, bool suppressable = false);
413*810390e3Srobert void AddThread(Tid tid, bool suppressable = false);
414*810390e3Srobert void AddUniqueTid(Tid unique_tid);
415*810390e3Srobert int AddMutex(uptr addr, StackID creation_stack_id);
4163cab2bb3Spatrick void AddLocation(uptr addr, uptr size);
417*810390e3Srobert void AddSleep(StackID stack_id);
4183cab2bb3Spatrick void SetCount(int count);
419*810390e3Srobert void SetSigNum(int sig);
4203cab2bb3Spatrick
4213cab2bb3Spatrick const ReportDesc *GetReport() const;
4223cab2bb3Spatrick
4233cab2bb3Spatrick protected:
4243cab2bb3Spatrick ScopedReportBase(ReportType typ, uptr tag);
4253cab2bb3Spatrick ~ScopedReportBase();
4263cab2bb3Spatrick
4273cab2bb3Spatrick private:
4283cab2bb3Spatrick ReportDesc *rep_;
4293cab2bb3Spatrick // Symbolizer makes lots of intercepted calls. If we try to process them,
4303cab2bb3Spatrick // at best it will cause deadlocks on internal mutexes.
4313cab2bb3Spatrick ScopedIgnoreInterceptors ignore_interceptors_;
4323cab2bb3Spatrick
4333cab2bb3Spatrick ScopedReportBase(const ScopedReportBase &) = delete;
4343cab2bb3Spatrick void operator=(const ScopedReportBase &) = delete;
4353cab2bb3Spatrick };
4363cab2bb3Spatrick
4373cab2bb3Spatrick class ScopedReport : public ScopedReportBase {
4383cab2bb3Spatrick public:
4393cab2bb3Spatrick explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
4403cab2bb3Spatrick ~ScopedReport();
4413cab2bb3Spatrick
4423cab2bb3Spatrick private:
4433cab2bb3Spatrick ScopedErrorReportLock lock_;
4443cab2bb3Spatrick };
4453cab2bb3Spatrick
446d89ec533Spatrick bool ShouldReport(ThreadState *thr, ReportType typ);
4473cab2bb3Spatrick ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
4483cab2bb3Spatrick
4493cab2bb3Spatrick // The stack could look like:
4503cab2bb3Spatrick // <start> | <main> | <foo> | tag | <bar>
4513cab2bb3Spatrick // This will extract the tag and keep:
4523cab2bb3Spatrick // <start> | <main> | <foo> | <bar>
4533cab2bb3Spatrick template<typename StackTraceTy>
4543cab2bb3Spatrick void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
4553cab2bb3Spatrick if (stack->size < 2) return;
4563cab2bb3Spatrick uptr possible_tag_pc = stack->trace[stack->size - 2];
4573cab2bb3Spatrick uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
4583cab2bb3Spatrick if (possible_tag == kExternalTagNone) return;
4593cab2bb3Spatrick stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
4603cab2bb3Spatrick stack->size -= 1;
4613cab2bb3Spatrick if (tag) *tag = possible_tag;
4623cab2bb3Spatrick }
4633cab2bb3Spatrick
4643cab2bb3Spatrick template<typename StackTraceTy>
4653cab2bb3Spatrick void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
4663cab2bb3Spatrick uptr *tag = nullptr) {
4673cab2bb3Spatrick uptr size = thr->shadow_stack_pos - thr->shadow_stack;
4683cab2bb3Spatrick uptr start = 0;
4693cab2bb3Spatrick if (size + !!toppc > kStackTraceMax) {
4703cab2bb3Spatrick start = size + !!toppc - kStackTraceMax;
4713cab2bb3Spatrick size = kStackTraceMax - !!toppc;
4723cab2bb3Spatrick }
4733cab2bb3Spatrick stack->Init(&thr->shadow_stack[start], size, toppc);
4743cab2bb3Spatrick ExtractTagFromStack(stack, tag);
4753cab2bb3Spatrick }
4763cab2bb3Spatrick
4773cab2bb3Spatrick #define GET_STACK_TRACE_FATAL(thr, pc) \
4783cab2bb3Spatrick VarSizeStackTrace stack; \
4793cab2bb3Spatrick ObtainCurrentStack(thr, pc, &stack); \
4803cab2bb3Spatrick stack.ReverseOrder();
4813cab2bb3Spatrick
4823cab2bb3Spatrick void MapShadow(uptr addr, uptr size);
4833cab2bb3Spatrick void MapThreadTrace(uptr addr, uptr size, const char *name);
4843cab2bb3Spatrick void DontNeedShadowFor(uptr addr, uptr size);
4853cab2bb3Spatrick void UnmapShadow(ThreadState *thr, uptr addr, uptr size);
4863cab2bb3Spatrick void InitializeShadowMemory();
487*810390e3Srobert void DontDumpShadow(uptr addr, uptr size);
4883cab2bb3Spatrick void InitializeInterceptors();
4893cab2bb3Spatrick void InitializeLibIgnore();
4903cab2bb3Spatrick void InitializeDynamicAnnotations();
4913cab2bb3Spatrick
4923cab2bb3Spatrick void ForkBefore(ThreadState *thr, uptr pc);
4933cab2bb3Spatrick void ForkParentAfter(ThreadState *thr, uptr pc);
494*810390e3Srobert void ForkChildAfter(ThreadState *thr, uptr pc, bool start_thread);
4953cab2bb3Spatrick
496*810390e3Srobert void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
497*810390e3Srobert AccessType typ);
4983cab2bb3Spatrick bool OutputReport(ThreadState *thr, const ScopedReport &srep);
4993cab2bb3Spatrick bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
5003cab2bb3Spatrick bool IsExpectedReport(uptr addr, uptr size);
5013cab2bb3Spatrick
5023cab2bb3Spatrick #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
5033cab2bb3Spatrick # define DPrintf Printf
5043cab2bb3Spatrick #else
5053cab2bb3Spatrick # define DPrintf(...)
5063cab2bb3Spatrick #endif
5073cab2bb3Spatrick
5083cab2bb3Spatrick #if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
5093cab2bb3Spatrick # define DPrintf2 Printf
5103cab2bb3Spatrick #else
5113cab2bb3Spatrick # define DPrintf2(...)
5123cab2bb3Spatrick #endif
5133cab2bb3Spatrick
514*810390e3Srobert StackID CurrentStackId(ThreadState *thr, uptr pc);
515*810390e3Srobert ReportStack *SymbolizeStackId(StackID stack_id);
5163cab2bb3Spatrick void PrintCurrentStack(ThreadState *thr, uptr pc);
5173cab2bb3Spatrick void PrintCurrentStackSlow(uptr pc); // uses libunwind
518*810390e3Srobert MBlock *JavaHeapBlock(uptr addr, uptr *start);
5193cab2bb3Spatrick
5203cab2bb3Spatrick void Initialize(ThreadState *thr);
5213cab2bb3Spatrick void MaybeSpawnBackgroundThread();
5223cab2bb3Spatrick int Finalize(ThreadState *thr);
5233cab2bb3Spatrick
5243cab2bb3Spatrick void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
5253cab2bb3Spatrick void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
5263cab2bb3Spatrick
527*810390e3Srobert void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
528*810390e3Srobert AccessType typ);
529*810390e3Srobert void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
530*810390e3Srobert AccessType typ);
531*810390e3Srobert // This creates 2 non-inlined specialized versions of MemoryAccessRange.
532*810390e3Srobert template <bool is_read>
533*810390e3Srobert void MemoryAccessRangeT(ThreadState *thr, uptr pc, uptr addr, uptr size);
5343cab2bb3Spatrick
535*810390e3Srobert ALWAYS_INLINE
MemoryAccessRange(ThreadState * thr,uptr pc,uptr addr,uptr size,bool is_write)536*810390e3Srobert void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
537*810390e3Srobert bool is_write) {
538*810390e3Srobert if (size == 0)
539*810390e3Srobert return;
540*810390e3Srobert if (is_write)
541*810390e3Srobert MemoryAccessRangeT<false>(thr, pc, addr, size);
542*810390e3Srobert else
543*810390e3Srobert MemoryAccessRangeT<true>(thr, pc, addr, size);
5443cab2bb3Spatrick }
5453cab2bb3Spatrick
546*810390e3Srobert void ShadowSet(RawShadow *p, RawShadow *end, RawShadow v);
5473cab2bb3Spatrick void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
548*810390e3Srobert void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
5493cab2bb3Spatrick void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
5503cab2bb3Spatrick void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr,
5513cab2bb3Spatrick uptr size);
5523cab2bb3Spatrick
553*810390e3Srobert void ThreadIgnoreBegin(ThreadState *thr, uptr pc);
554*810390e3Srobert void ThreadIgnoreEnd(ThreadState *thr);
555*810390e3Srobert void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc);
556*810390e3Srobert void ThreadIgnoreSyncEnd(ThreadState *thr);
5573cab2bb3Spatrick
558*810390e3Srobert Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
559*810390e3Srobert void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
5603cab2bb3Spatrick ThreadType thread_type);
5613cab2bb3Spatrick void ThreadFinish(ThreadState *thr);
562*810390e3Srobert Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid);
563*810390e3Srobert void ThreadJoin(ThreadState *thr, uptr pc, Tid tid);
564*810390e3Srobert void ThreadDetach(ThreadState *thr, uptr pc, Tid tid);
5653cab2bb3Spatrick void ThreadFinalize(ThreadState *thr);
5663cab2bb3Spatrick void ThreadSetName(ThreadState *thr, const char *name);
5673cab2bb3Spatrick int ThreadCount(ThreadState *thr);
568*810390e3Srobert void ProcessPendingSignalsImpl(ThreadState *thr);
569*810390e3Srobert void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid);
5703cab2bb3Spatrick
5713cab2bb3Spatrick Processor *ProcCreate();
5723cab2bb3Spatrick void ProcDestroy(Processor *proc);
5733cab2bb3Spatrick void ProcWire(Processor *proc, ThreadState *thr);
5743cab2bb3Spatrick void ProcUnwire(Processor *proc, ThreadState *thr);
5753cab2bb3Spatrick
5763cab2bb3Spatrick // Note: the parameter is called flagz, because flags is already taken
5773cab2bb3Spatrick // by the global function that returns flags.
5783cab2bb3Spatrick void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5793cab2bb3Spatrick void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5803cab2bb3Spatrick void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5813cab2bb3Spatrick void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
5823cab2bb3Spatrick int rec = 1);
5833cab2bb3Spatrick int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5843cab2bb3Spatrick void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5853cab2bb3Spatrick void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
5863cab2bb3Spatrick void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
5873cab2bb3Spatrick void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
5883cab2bb3Spatrick void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
5893cab2bb3Spatrick void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
5903cab2bb3Spatrick
5913cab2bb3Spatrick void Acquire(ThreadState *thr, uptr pc, uptr addr);
5923cab2bb3Spatrick // AcquireGlobal synchronizes the current thread with all other threads.
5933cab2bb3Spatrick // In terms of happens-before relation, it draws a HB edge from all threads
5943cab2bb3Spatrick // (where they happen to execute right now) to the current thread. We use it to
5953cab2bb3Spatrick // handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
5963cab2bb3Spatrick // right before executing finalizers. This provides a coarse, but simple
5973cab2bb3Spatrick // approximation of the actual required synchronization.
598*810390e3Srobert void AcquireGlobal(ThreadState *thr);
5993cab2bb3Spatrick void Release(ThreadState *thr, uptr pc, uptr addr);
6001f9cb04fSpatrick void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr);
6013cab2bb3Spatrick void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
6023cab2bb3Spatrick void AfterSleep(ThreadState *thr, uptr pc);
603*810390e3Srobert void IncrementEpoch(ThreadState *thr);
6043cab2bb3Spatrick
6053cab2bb3Spatrick #if !SANITIZER_GO
HeapEnd()6063cab2bb3Spatrick uptr ALWAYS_INLINE HeapEnd() {
6073cab2bb3Spatrick return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
6083cab2bb3Spatrick }
6093cab2bb3Spatrick #endif
6103cab2bb3Spatrick
611*810390e3Srobert void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
612*810390e3Srobert void SlotDetach(ThreadState *thr);
613*810390e3Srobert void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
614*810390e3Srobert void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
615*810390e3Srobert void DoReset(ThreadState *thr, uptr epoch);
616*810390e3Srobert void FlushShadowMemory();
617*810390e3Srobert
6183cab2bb3Spatrick ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
6193cab2bb3Spatrick void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
6203cab2bb3Spatrick void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
6213cab2bb3Spatrick
6223cab2bb3Spatrick // These need to match __tsan_switch_to_fiber_* flags defined in
6233cab2bb3Spatrick // tsan_interface.h. See documentation there as well.
6243cab2bb3Spatrick enum FiberSwitchFlags {
6253cab2bb3Spatrick FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
6263cab2bb3Spatrick };
6273cab2bb3Spatrick
628*810390e3Srobert class SlotLocker {
629*810390e3Srobert public:
630*810390e3Srobert ALWAYS_INLINE
631*810390e3Srobert SlotLocker(ThreadState *thr, bool recursive = false)
thr_(thr)632*810390e3Srobert : thr_(thr), locked_(recursive ? thr->slot_locked : false) {
633*810390e3Srobert #if !SANITIZER_GO
634*810390e3Srobert // We are in trouble if we are here with in_blocking_func set.
635*810390e3Srobert // If in_blocking_func is set, all signals will be delivered synchronously,
636*810390e3Srobert // which means we can't lock slots since the signal handler will try
637*810390e3Srobert // to lock it recursively and deadlock.
638*810390e3Srobert DCHECK(!atomic_load(&thr->in_blocking_func, memory_order_relaxed));
639*810390e3Srobert #endif
640*810390e3Srobert if (!locked_)
641*810390e3Srobert SlotLock(thr_);
642*810390e3Srobert }
643*810390e3Srobert
644*810390e3Srobert ALWAYS_INLINE
~SlotLocker()645*810390e3Srobert ~SlotLocker() {
646*810390e3Srobert if (!locked_)
647*810390e3Srobert SlotUnlock(thr_);
648*810390e3Srobert }
649*810390e3Srobert
650*810390e3Srobert private:
651*810390e3Srobert ThreadState *thr_;
652*810390e3Srobert bool locked_;
653*810390e3Srobert };
654*810390e3Srobert
655*810390e3Srobert class SlotUnlocker {
656*810390e3Srobert public:
SlotUnlocker(ThreadState * thr)657*810390e3Srobert SlotUnlocker(ThreadState *thr) : thr_(thr), locked_(thr->slot_locked) {
658*810390e3Srobert if (locked_)
659*810390e3Srobert SlotUnlock(thr_);
660*810390e3Srobert }
661*810390e3Srobert
~SlotUnlocker()662*810390e3Srobert ~SlotUnlocker() {
663*810390e3Srobert if (locked_)
664*810390e3Srobert SlotLock(thr_);
665*810390e3Srobert }
666*810390e3Srobert
667*810390e3Srobert private:
668*810390e3Srobert ThreadState *thr_;
669*810390e3Srobert bool locked_;
670*810390e3Srobert };
671*810390e3Srobert
ProcessPendingSignals(ThreadState * thr)672*810390e3Srobert ALWAYS_INLINE void ProcessPendingSignals(ThreadState *thr) {
673*810390e3Srobert if (UNLIKELY(atomic_load_relaxed(&thr->pending_signals)))
674*810390e3Srobert ProcessPendingSignalsImpl(thr);
675*810390e3Srobert }
676*810390e3Srobert
677*810390e3Srobert extern bool is_initialized;
678*810390e3Srobert
679*810390e3Srobert ALWAYS_INLINE
LazyInitialize(ThreadState * thr)680*810390e3Srobert void LazyInitialize(ThreadState *thr) {
681*810390e3Srobert // If we can use .preinit_array, assume that __tsan_init
682*810390e3Srobert // called from .preinit_array initializes runtime before
683*810390e3Srobert // any instrumented code except when tsan is used as a
684*810390e3Srobert // shared library.
685*810390e3Srobert #if (!SANITIZER_CAN_USE_PREINIT_ARRAY || defined(SANITIZER_SHARED))
686*810390e3Srobert if (UNLIKELY(!is_initialized))
687*810390e3Srobert Initialize(thr);
688*810390e3Srobert #endif
689*810390e3Srobert }
690*810390e3Srobert
691*810390e3Srobert void TraceResetForTesting();
692*810390e3Srobert void TraceSwitchPart(ThreadState *thr);
693*810390e3Srobert void TraceSwitchPartImpl(ThreadState *thr);
694*810390e3Srobert bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
695*810390e3Srobert AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
696*810390e3Srobert MutexSet *pmset, uptr *ptag);
697*810390e3Srobert
698*810390e3Srobert template <typename EventT>
TraceAcquire(ThreadState * thr,EventT ** ev)699*810390e3Srobert ALWAYS_INLINE WARN_UNUSED_RESULT bool TraceAcquire(ThreadState *thr,
700*810390e3Srobert EventT **ev) {
701*810390e3Srobert // TraceSwitchPart accesses shadow_stack, but it's called infrequently,
702*810390e3Srobert // so we check it here proactively.
703*810390e3Srobert DCHECK(thr->shadow_stack);
704*810390e3Srobert Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
705*810390e3Srobert #if SANITIZER_DEBUG
706*810390e3Srobert // TraceSwitch acquires these mutexes,
707*810390e3Srobert // so we lock them here to detect deadlocks more reliably.
708*810390e3Srobert { Lock lock(&ctx->slot_mtx); }
709*810390e3Srobert { Lock lock(&thr->tctx->trace.mtx); }
710*810390e3Srobert TracePart *current = thr->tctx->trace.parts.Back();
711*810390e3Srobert if (current) {
712*810390e3Srobert DCHECK_GE(pos, ¤t->events[0]);
713*810390e3Srobert DCHECK_LE(pos, ¤t->events[TracePart::kSize]);
714*810390e3Srobert } else {
715*810390e3Srobert DCHECK_EQ(pos, nullptr);
716*810390e3Srobert }
717*810390e3Srobert #endif
718*810390e3Srobert // TracePart is allocated with mmap and is at least 4K aligned.
719*810390e3Srobert // So the following check is a faster way to check for part end.
720*810390e3Srobert // It may have false positives in the middle of the trace,
721*810390e3Srobert // they are filtered out in TraceSwitch.
722*810390e3Srobert if (UNLIKELY(((uptr)(pos + 1) & TracePart::kAlignment) == 0))
723*810390e3Srobert return false;
724*810390e3Srobert *ev = reinterpret_cast<EventT *>(pos);
725*810390e3Srobert return true;
726*810390e3Srobert }
727*810390e3Srobert
728*810390e3Srobert template <typename EventT>
TraceRelease(ThreadState * thr,EventT * evp)729*810390e3Srobert ALWAYS_INLINE void TraceRelease(ThreadState *thr, EventT *evp) {
730*810390e3Srobert DCHECK_LE(evp + 1, &thr->tctx->trace.parts.Back()->events[TracePart::kSize]);
731*810390e3Srobert atomic_store_relaxed(&thr->trace_pos, (uptr)(evp + 1));
732*810390e3Srobert }
733*810390e3Srobert
734*810390e3Srobert template <typename EventT>
TraceEvent(ThreadState * thr,EventT ev)735*810390e3Srobert void TraceEvent(ThreadState *thr, EventT ev) {
736*810390e3Srobert EventT *evp;
737*810390e3Srobert if (!TraceAcquire(thr, &evp)) {
738*810390e3Srobert TraceSwitchPart(thr);
739*810390e3Srobert UNUSED bool res = TraceAcquire(thr, &evp);
740*810390e3Srobert DCHECK(res);
741*810390e3Srobert }
742*810390e3Srobert *evp = ev;
743*810390e3Srobert TraceRelease(thr, evp);
744*810390e3Srobert }
745*810390e3Srobert
746*810390e3Srobert ALWAYS_INLINE WARN_UNUSED_RESULT bool TryTraceFunc(ThreadState *thr,
747*810390e3Srobert uptr pc = 0) {
748*810390e3Srobert if (!kCollectHistory)
749*810390e3Srobert return true;
750*810390e3Srobert EventFunc *ev;
751*810390e3Srobert if (UNLIKELY(!TraceAcquire(thr, &ev)))
752*810390e3Srobert return false;
753*810390e3Srobert ev->is_access = 0;
754*810390e3Srobert ev->is_func = 1;
755*810390e3Srobert ev->pc = pc;
756*810390e3Srobert TraceRelease(thr, ev);
757*810390e3Srobert return true;
758*810390e3Srobert }
759*810390e3Srobert
760*810390e3Srobert WARN_UNUSED_RESULT
761*810390e3Srobert bool TryTraceMemoryAccess(ThreadState *thr, uptr pc, uptr addr, uptr size,
762*810390e3Srobert AccessType typ);
763*810390e3Srobert WARN_UNUSED_RESULT
764*810390e3Srobert bool TryTraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
765*810390e3Srobert AccessType typ);
766*810390e3Srobert void TraceMemoryAccessRange(ThreadState *thr, uptr pc, uptr addr, uptr size,
767*810390e3Srobert AccessType typ);
768*810390e3Srobert void TraceFunc(ThreadState *thr, uptr pc = 0);
769*810390e3Srobert void TraceMutexLock(ThreadState *thr, EventType type, uptr pc, uptr addr,
770*810390e3Srobert StackID stk);
771*810390e3Srobert void TraceMutexUnlock(ThreadState *thr, uptr addr);
772*810390e3Srobert void TraceTime(ThreadState *thr);
773*810390e3Srobert
774*810390e3Srobert void TraceRestartFuncExit(ThreadState *thr);
775*810390e3Srobert void TraceRestartFuncEntry(ThreadState *thr, uptr pc);
776*810390e3Srobert
777*810390e3Srobert void GrowShadowStack(ThreadState *thr);
778*810390e3Srobert
779*810390e3Srobert ALWAYS_INLINE
FuncEntry(ThreadState * thr,uptr pc)780*810390e3Srobert void FuncEntry(ThreadState *thr, uptr pc) {
781*810390e3Srobert DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.sid(), (void *)pc);
782*810390e3Srobert if (UNLIKELY(!TryTraceFunc(thr, pc)))
783*810390e3Srobert return TraceRestartFuncEntry(thr, pc);
784*810390e3Srobert DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
785*810390e3Srobert #if !SANITIZER_GO
786*810390e3Srobert DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
787*810390e3Srobert #else
788*810390e3Srobert if (thr->shadow_stack_pos == thr->shadow_stack_end)
789*810390e3Srobert GrowShadowStack(thr);
790*810390e3Srobert #endif
791*810390e3Srobert thr->shadow_stack_pos[0] = pc;
792*810390e3Srobert thr->shadow_stack_pos++;
793*810390e3Srobert }
794*810390e3Srobert
795*810390e3Srobert ALWAYS_INLINE
FuncExit(ThreadState * thr)796*810390e3Srobert void FuncExit(ThreadState *thr) {
797*810390e3Srobert DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.sid());
798*810390e3Srobert if (UNLIKELY(!TryTraceFunc(thr, 0)))
799*810390e3Srobert return TraceRestartFuncExit(thr);
800*810390e3Srobert DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
801*810390e3Srobert #if !SANITIZER_GO
802*810390e3Srobert DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
803*810390e3Srobert #endif
804*810390e3Srobert thr->shadow_stack_pos--;
805*810390e3Srobert }
806*810390e3Srobert
807*810390e3Srobert #if !SANITIZER_GO
808*810390e3Srobert extern void (*on_initialize)(void);
809*810390e3Srobert extern int (*on_finalize)(int);
810*810390e3Srobert #endif
8113cab2bb3Spatrick } // namespace __tsan
8123cab2bb3Spatrick
8133cab2bb3Spatrick #endif // TSAN_RTL_H
814