168d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===// 268d75effSDimitry Andric // 368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 668d75effSDimitry Andric // 768d75effSDimitry Andric //===----------------------------------------------------------------------===// 868d75effSDimitry Andric // 968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 1068d75effSDimitry Andric // 1168d75effSDimitry Andric // Main file (entry points) for the TSan run-time. 1268d75effSDimitry Andric //===----------------------------------------------------------------------===// 1368d75effSDimitry Andric 14fe6060f1SDimitry Andric #include "tsan_rtl.h" 15fe6060f1SDimitry Andric 1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h" 1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h" 1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h" 1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h" 2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 21fe6060f1SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h" 2368d75effSDimitry Andric #include "tsan_defs.h" 24fe6060f1SDimitry Andric #include "tsan_interface.h" 2568d75effSDimitry Andric #include "tsan_mman.h" 26fe6060f1SDimitry Andric #include "tsan_platform.h" 2768d75effSDimitry Andric #include "tsan_suppressions.h" 2868d75effSDimitry Andric #include "tsan_symbolize.h" 2968d75effSDimitry Andric #include "ubsan/ubsan_init.h" 3068d75effSDimitry Andric 3168d75effSDimitry Andric volatile int __tsan_resumed = 0; 3268d75effSDimitry Andric 3368d75effSDimitry Andric extern "C" void __tsan_resume() { 3468d75effSDimitry Andric __tsan_resumed = 1; 3568d75effSDimitry Andric } 3668d75effSDimitry Andric 3768d75effSDimitry Andric namespace __tsan { 3868d75effSDimitry Andric 39*349cc55cSDimitry Andric #if !SANITIZER_GO 40*349cc55cSDimitry Andric void (*on_initialize)(void); 41*349cc55cSDimitry Andric int (*on_finalize)(int); 42*349cc55cSDimitry Andric #endif 43*349cc55cSDimitry Andric 4468d75effSDimitry Andric #if !SANITIZER_GO && !SANITIZER_MAC 4568d75effSDimitry Andric __attribute__((tls_model("initial-exec"))) 46*349cc55cSDimitry Andric THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED( 47*349cc55cSDimitry Andric SANITIZER_CACHE_LINE_SIZE); 4868d75effSDimitry Andric #endif 49*349cc55cSDimitry Andric static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE); 5068d75effSDimitry Andric Context *ctx; 5168d75effSDimitry Andric 5268d75effSDimitry Andric // Can be overriden by a front-end. 5368d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS 5468d75effSDimitry Andric bool OnFinalize(bool failed); 5568d75effSDimitry Andric void OnInitialize(); 5668d75effSDimitry Andric #else 57fe6060f1SDimitry Andric #include <dlfcn.h> 5868d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 5968d75effSDimitry Andric bool OnFinalize(bool failed) { 60fe6060f1SDimitry Andric #if !SANITIZER_GO 61*349cc55cSDimitry Andric if (on_finalize) 62*349cc55cSDimitry Andric return on_finalize(failed); 63fe6060f1SDimitry Andric #endif 6468d75effSDimitry Andric return failed; 6568d75effSDimitry Andric } 6668d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 67fe6060f1SDimitry Andric void OnInitialize() { 68fe6060f1SDimitry Andric #if !SANITIZER_GO 69*349cc55cSDimitry Andric if (on_initialize) 70*349cc55cSDimitry Andric on_initialize(); 71fe6060f1SDimitry Andric #endif 72fe6060f1SDimitry Andric } 7368d75effSDimitry Andric #endif 7468d75effSDimitry Andric 75*349cc55cSDimitry Andric static ThreadContextBase *CreateThreadContext(Tid tid) { 7668d75effSDimitry Andric // Map thread trace when context is created. 7768d75effSDimitry Andric char name[50]; 7868d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace %u", tid); 7968d75effSDimitry Andric MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 8068d75effSDimitry Andric const uptr hdr = GetThreadTraceHeader(tid); 8168d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace header %u", tid); 8268d75effSDimitry Andric MapThreadTrace(hdr, sizeof(Trace), name); 8368d75effSDimitry Andric new((void*)hdr) Trace(); 8468d75effSDimitry Andric // We are going to use only a small part of the trace with the default 8568d75effSDimitry Andric // value of history_size. However, the constructor writes to the whole trace. 86fe6060f1SDimitry Andric // Release the unused part. 8768d75effSDimitry Andric uptr hdr_end = hdr + sizeof(Trace); 8868d75effSDimitry Andric hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 8968d75effSDimitry Andric hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 90fe6060f1SDimitry Andric if (hdr_end < hdr + sizeof(Trace)) { 91fe6060f1SDimitry Andric ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace)); 92fe6060f1SDimitry Andric uptr unused = hdr + sizeof(Trace) - hdr_end; 93fe6060f1SDimitry Andric if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) { 94*349cc55cSDimitry Andric Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end, 95*349cc55cSDimitry Andric unused); 96fe6060f1SDimitry Andric CHECK("unable to mprotect" && 0); 97fe6060f1SDimitry Andric } 98fe6060f1SDimitry Andric } 99*349cc55cSDimitry Andric return New<ThreadContext>(tid); 10068d75effSDimitry Andric } 10168d75effSDimitry Andric 10268d75effSDimitry Andric #if !SANITIZER_GO 10368d75effSDimitry Andric static const u32 kThreadQuarantineSize = 16; 10468d75effSDimitry Andric #else 10568d75effSDimitry Andric static const u32 kThreadQuarantineSize = 64; 10668d75effSDimitry Andric #endif 10768d75effSDimitry Andric 10868d75effSDimitry Andric Context::Context() 109fe6060f1SDimitry Andric : initialized(), 110fe6060f1SDimitry Andric report_mtx(MutexTypeReport), 111fe6060f1SDimitry Andric nreported(), 112*349cc55cSDimitry Andric thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize, 113*349cc55cSDimitry Andric kMaxTidReuse), 114fe6060f1SDimitry Andric racy_mtx(MutexTypeRacy), 115fe6060f1SDimitry Andric racy_stacks(), 116fe6060f1SDimitry Andric racy_addresses(), 117fe6060f1SDimitry Andric fired_suppressions_mtx(MutexTypeFired), 118fe6060f1SDimitry Andric clock_alloc(LINKER_INITIALIZED, "clock allocator") { 11968d75effSDimitry Andric fired_suppressions.reserve(8); 12068d75effSDimitry Andric } 12168d75effSDimitry Andric 12268d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization. 123*349cc55cSDimitry Andric ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch, 124fe6060f1SDimitry Andric unsigned reuse_count, uptr stk_addr, uptr stk_size, 12568d75effSDimitry Andric uptr tls_addr, uptr tls_size) 12668d75effSDimitry Andric : fast_state(tid, epoch) 12768d75effSDimitry Andric // Do not touch these, rely on zero initialization, 12868d75effSDimitry Andric // they may be accessed before the ctor. 12968d75effSDimitry Andric // , ignore_reads_and_writes() 13068d75effSDimitry Andric // , ignore_interceptors() 131fe6060f1SDimitry Andric , 132fe6060f1SDimitry Andric clock(tid, reuse_count) 13368d75effSDimitry Andric #if !SANITIZER_GO 134fe6060f1SDimitry Andric , 135fe6060f1SDimitry Andric jmp_bufs() 13668d75effSDimitry Andric #endif 137fe6060f1SDimitry Andric , 138fe6060f1SDimitry Andric tid(tid), 139fe6060f1SDimitry Andric unique_id(unique_id), 140fe6060f1SDimitry Andric stk_addr(stk_addr), 141fe6060f1SDimitry Andric stk_size(stk_size), 142fe6060f1SDimitry Andric tls_addr(tls_addr), 143fe6060f1SDimitry Andric tls_size(tls_size) 14468d75effSDimitry Andric #if !SANITIZER_GO 145fe6060f1SDimitry Andric , 146fe6060f1SDimitry Andric last_sleep_clock(tid) 14768d75effSDimitry Andric #endif 14868d75effSDimitry Andric { 149*349cc55cSDimitry Andric CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0); 150*349cc55cSDimitry Andric #if !SANITIZER_GO 151*349cc55cSDimitry Andric // C/C++ uses fixed size shadow stack. 152*349cc55cSDimitry Andric const int kInitStackSize = kShadowStackSize; 153*349cc55cSDimitry Andric shadow_stack = static_cast<uptr *>( 154*349cc55cSDimitry Andric MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack")); 155*349cc55cSDimitry Andric SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack), 156*349cc55cSDimitry Andric kInitStackSize * sizeof(uptr)); 157*349cc55cSDimitry Andric #else 158*349cc55cSDimitry Andric // Go uses malloc-allocated shadow stack with dynamic size. 159*349cc55cSDimitry Andric const int kInitStackSize = 8; 160*349cc55cSDimitry Andric shadow_stack = static_cast<uptr *>(Alloc(kInitStackSize * sizeof(uptr))); 161*349cc55cSDimitry Andric #endif 162*349cc55cSDimitry Andric shadow_stack_pos = shadow_stack; 163*349cc55cSDimitry Andric shadow_stack_end = shadow_stack + kInitStackSize; 16468d75effSDimitry Andric } 16568d75effSDimitry Andric 16668d75effSDimitry Andric #if !SANITIZER_GO 167*349cc55cSDimitry Andric void MemoryProfiler(u64 uptime) { 168*349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd) 169*349cc55cSDimitry Andric return; 17068d75effSDimitry Andric InternalMmapVector<char> buf(4096); 171*349cc55cSDimitry Andric WriteMemoryProfile(buf.data(), buf.size(), uptime); 172*349cc55cSDimitry Andric WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data())); 173*349cc55cSDimitry Andric } 174*349cc55cSDimitry Andric 175*349cc55cSDimitry Andric void InitializeMemoryProfiler() { 176*349cc55cSDimitry Andric ctx->memprof_fd = kInvalidFd; 177*349cc55cSDimitry Andric const char *fname = flags()->profile_memory; 178*349cc55cSDimitry Andric if (!fname || !fname[0]) 179*349cc55cSDimitry Andric return; 180*349cc55cSDimitry Andric if (internal_strcmp(fname, "stdout") == 0) { 181*349cc55cSDimitry Andric ctx->memprof_fd = 1; 182*349cc55cSDimitry Andric } else if (internal_strcmp(fname, "stderr") == 0) { 183*349cc55cSDimitry Andric ctx->memprof_fd = 2; 184*349cc55cSDimitry Andric } else { 185*349cc55cSDimitry Andric InternalScopedString filename; 186*349cc55cSDimitry Andric filename.append("%s.%d", fname, (int)internal_getpid()); 187*349cc55cSDimitry Andric ctx->memprof_fd = OpenFile(filename.data(), WrOnly); 188*349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd) { 189*349cc55cSDimitry Andric Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 190*349cc55cSDimitry Andric filename.data()); 191*349cc55cSDimitry Andric return; 192*349cc55cSDimitry Andric } 193*349cc55cSDimitry Andric } 194*349cc55cSDimitry Andric MemoryProfiler(0); 195*349cc55cSDimitry Andric MaybeSpawnBackgroundThread(); 19668d75effSDimitry Andric } 19768d75effSDimitry Andric 1985ffd83dbSDimitry Andric static void *BackgroundThread(void *arg) { 19968d75effSDimitry Andric // This is a non-initialized non-user thread, nothing to see here. 20068d75effSDimitry Andric // We don't use ScopedIgnoreInterceptors, because we want ignores to be 20168d75effSDimitry Andric // enabled even when the thread function exits (e.g. during pthread thread 20268d75effSDimitry Andric // shutdown code). 203*349cc55cSDimitry Andric cur_thread_init()->ignore_interceptors++; 20468d75effSDimitry Andric const u64 kMs2Ns = 1000 * 1000; 205*349cc55cSDimitry Andric const u64 start = NanoTime(); 20668d75effSDimitry Andric 20768d75effSDimitry Andric u64 last_flush = NanoTime(); 20868d75effSDimitry Andric uptr last_rss = 0; 20968d75effSDimitry Andric for (int i = 0; 21068d75effSDimitry Andric atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 21168d75effSDimitry Andric i++) { 21268d75effSDimitry Andric SleepForMillis(100); 21368d75effSDimitry Andric u64 now = NanoTime(); 21468d75effSDimitry Andric 21568d75effSDimitry Andric // Flush memory if requested. 21668d75effSDimitry Andric if (flags()->flush_memory_ms > 0) { 21768d75effSDimitry Andric if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 21868d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 21968d75effSDimitry Andric FlushShadowMemory(); 22068d75effSDimitry Andric last_flush = NanoTime(); 22168d75effSDimitry Andric } 22268d75effSDimitry Andric } 22368d75effSDimitry Andric if (flags()->memory_limit_mb > 0) { 22468d75effSDimitry Andric uptr rss = GetRSS(); 22568d75effSDimitry Andric uptr limit = uptr(flags()->memory_limit_mb) << 20; 22668d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flush check" 22768d75effSDimitry Andric " RSS=%llu LAST=%llu LIMIT=%llu\n", 22868d75effSDimitry Andric (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 22968d75effSDimitry Andric if (2 * rss > limit + last_rss) { 23068d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 23168d75effSDimitry Andric FlushShadowMemory(); 23268d75effSDimitry Andric rss = GetRSS(); 23368d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 23468d75effSDimitry Andric } 23568d75effSDimitry Andric last_rss = rss; 23668d75effSDimitry Andric } 23768d75effSDimitry Andric 238*349cc55cSDimitry Andric MemoryProfiler(now - start); 23968d75effSDimitry Andric 24068d75effSDimitry Andric // Flush symbolizer cache if requested. 24168d75effSDimitry Andric if (flags()->flush_symbolizer_ms > 0) { 24268d75effSDimitry Andric u64 last = atomic_load(&ctx->last_symbolize_time_ns, 24368d75effSDimitry Andric memory_order_relaxed); 24468d75effSDimitry Andric if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 24568d75effSDimitry Andric Lock l(&ctx->report_mtx); 24668d75effSDimitry Andric ScopedErrorReportLock l2; 24768d75effSDimitry Andric SymbolizeFlush(); 24868d75effSDimitry Andric atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 24968d75effSDimitry Andric } 25068d75effSDimitry Andric } 25168d75effSDimitry Andric } 2525ffd83dbSDimitry Andric return nullptr; 25368d75effSDimitry Andric } 25468d75effSDimitry Andric 25568d75effSDimitry Andric static void StartBackgroundThread() { 25668d75effSDimitry Andric ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 25768d75effSDimitry Andric } 25868d75effSDimitry Andric 25968d75effSDimitry Andric #ifndef __mips__ 26068d75effSDimitry Andric static void StopBackgroundThread() { 26168d75effSDimitry Andric atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 26268d75effSDimitry Andric internal_join_thread(ctx->background_thread); 26368d75effSDimitry Andric ctx->background_thread = 0; 26468d75effSDimitry Andric } 26568d75effSDimitry Andric #endif 26668d75effSDimitry Andric #endif 26768d75effSDimitry Andric 26868d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) { 269*349cc55cSDimitry Andric ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)), 270*349cc55cSDimitry Andric reinterpret_cast<uptr>(MemToShadow(addr + size))); 27168d75effSDimitry Andric } 27268d75effSDimitry Andric 27368d75effSDimitry Andric #if !SANITIZER_GO 27468d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 27568d75effSDimitry Andric if (size == 0) return; 27668d75effSDimitry Andric DontNeedShadowFor(addr, size); 27768d75effSDimitry Andric ScopedGlobalProcessor sgp; 27868d75effSDimitry Andric ctx->metamap.ResetRange(thr->proc(), addr, size); 27968d75effSDimitry Andric } 28068d75effSDimitry Andric #endif 28168d75effSDimitry Andric 28268d75effSDimitry Andric void MapShadow(uptr addr, uptr size) { 28368d75effSDimitry Andric // Global data is not 64K aligned, but there are no adjacent mappings, 28468d75effSDimitry Andric // so we can get away with unaligned mapping. 28568d75effSDimitry Andric // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 28668d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 28768d75effSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 28868d75effSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 289e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, 290e8d8bef9SDimitry Andric "shadow")) 29168d75effSDimitry Andric Die(); 29268d75effSDimitry Andric 29368d75effSDimitry Andric // Meta shadow is 2:1, so tread carefully. 29468d75effSDimitry Andric static bool data_mapped = false; 29568d75effSDimitry Andric static uptr mapped_meta_end = 0; 29668d75effSDimitry Andric uptr meta_begin = (uptr)MemToMeta(addr); 29768d75effSDimitry Andric uptr meta_end = (uptr)MemToMeta(addr + size); 29868d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 29968d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 30068d75effSDimitry Andric if (!data_mapped) { 30168d75effSDimitry Andric // First call maps data+bss. 30268d75effSDimitry Andric data_mapped = true; 303e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 304e8d8bef9SDimitry Andric "meta shadow")) 30568d75effSDimitry Andric Die(); 30668d75effSDimitry Andric } else { 307*349cc55cSDimitry Andric // Mapping continuous heap. 30868d75effSDimitry Andric // Windows wants 64K alignment. 30968d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 31068d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 31168d75effSDimitry Andric if (meta_end <= mapped_meta_end) 31268d75effSDimitry Andric return; 31368d75effSDimitry Andric if (meta_begin < mapped_meta_end) 31468d75effSDimitry Andric meta_begin = mapped_meta_end; 315e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 316e8d8bef9SDimitry Andric "meta shadow")) 31768d75effSDimitry Andric Die(); 31868d75effSDimitry Andric mapped_meta_end = meta_end; 31968d75effSDimitry Andric } 320*349cc55cSDimitry Andric VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr, 321*349cc55cSDimitry Andric addr + size, meta_begin, meta_end); 32268d75effSDimitry Andric } 32368d75effSDimitry Andric 32468d75effSDimitry Andric void MapThreadTrace(uptr addr, uptr size, const char *name) { 325*349cc55cSDimitry Andric DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size); 32668d75effSDimitry Andric CHECK_GE(addr, TraceMemBeg()); 32768d75effSDimitry Andric CHECK_LE(addr + size, TraceMemEnd()); 32868d75effSDimitry Andric CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 329e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(addr, size, name)) { 330*349cc55cSDimitry Andric Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n", 33168d75effSDimitry Andric addr, size); 33268d75effSDimitry Andric Die(); 33368d75effSDimitry Andric } 33468d75effSDimitry Andric } 33568d75effSDimitry Andric 33668d75effSDimitry Andric #if !SANITIZER_GO 33768d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *, 33868d75effSDimitry Andric BufferedStackTrace *stack) { 33968d75effSDimitry Andric stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 34068d75effSDimitry Andric common_flags()->fast_unwind_on_fatal); 34168d75effSDimitry Andric } 34268d75effSDimitry Andric 34368d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 34468d75effSDimitry Andric HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 34568d75effSDimitry Andric } 34668d75effSDimitry Andric #endif 34768d75effSDimitry Andric 348fe6060f1SDimitry Andric void CheckUnwind() { 349fe6060f1SDimitry Andric // There is high probability that interceptors will check-fail as well, 350fe6060f1SDimitry Andric // on the other hand there is no sense in processing interceptors 351fe6060f1SDimitry Andric // since we are going to die soon. 352fe6060f1SDimitry Andric ScopedIgnoreInterceptors ignore; 353fe6060f1SDimitry Andric #if !SANITIZER_GO 354fe6060f1SDimitry Andric cur_thread()->ignore_sync++; 355fe6060f1SDimitry Andric cur_thread()->ignore_reads_and_writes++; 356fe6060f1SDimitry Andric #endif 357fe6060f1SDimitry Andric PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 358fe6060f1SDimitry Andric } 359fe6060f1SDimitry Andric 360*349cc55cSDimitry Andric bool is_initialized; 361*349cc55cSDimitry Andric 36268d75effSDimitry Andric void Initialize(ThreadState *thr) { 36368d75effSDimitry Andric // Thread safe because done before all threads exist. 36468d75effSDimitry Andric if (is_initialized) 36568d75effSDimitry Andric return; 36668d75effSDimitry Andric is_initialized = true; 36768d75effSDimitry Andric // We are not ready to handle interceptors yet. 36868d75effSDimitry Andric ScopedIgnoreInterceptors ignore; 36968d75effSDimitry Andric SanitizerToolName = "ThreadSanitizer"; 37068d75effSDimitry Andric // Install tool-specific callbacks in sanitizer_common. 371fe6060f1SDimitry Andric SetCheckUnwindCallback(CheckUnwind); 37268d75effSDimitry Andric 37368d75effSDimitry Andric ctx = new(ctx_placeholder) Context; 37468d75effSDimitry Andric const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 37568d75effSDimitry Andric const char *options = GetEnv(env_name); 37668d75effSDimitry Andric CacheBinaryName(); 37768d75effSDimitry Andric CheckASLR(); 37868d75effSDimitry Andric InitializeFlags(&ctx->flags, options, env_name); 37968d75effSDimitry Andric AvoidCVE_2016_2143(); 38068d75effSDimitry Andric __sanitizer::InitializePlatformEarly(); 38168d75effSDimitry Andric __tsan::InitializePlatformEarly(); 38268d75effSDimitry Andric 38368d75effSDimitry Andric #if !SANITIZER_GO 38468d75effSDimitry Andric // Re-exec ourselves if we need to set additional env or command line args. 38568d75effSDimitry Andric MaybeReexec(); 38668d75effSDimitry Andric 38768d75effSDimitry Andric InitializeAllocator(); 38868d75effSDimitry Andric ReplaceSystemMalloc(); 38968d75effSDimitry Andric #endif 39068d75effSDimitry Andric if (common_flags()->detect_deadlocks) 39168d75effSDimitry Andric ctx->dd = DDetector::Create(flags()); 39268d75effSDimitry Andric Processor *proc = ProcCreate(); 39368d75effSDimitry Andric ProcWire(proc, thr); 39468d75effSDimitry Andric InitializeInterceptors(); 39568d75effSDimitry Andric InitializePlatform(); 39668d75effSDimitry Andric InitializeDynamicAnnotations(); 39768d75effSDimitry Andric #if !SANITIZER_GO 39868d75effSDimitry Andric InitializeShadowMemory(); 39968d75effSDimitry Andric InitializeAllocatorLate(); 40068d75effSDimitry Andric InstallDeadlySignalHandlers(TsanOnDeadlySignal); 40168d75effSDimitry Andric #endif 40268d75effSDimitry Andric // Setup correct file descriptor for error reports. 40368d75effSDimitry Andric __sanitizer_set_report_path(common_flags()->log_path); 40468d75effSDimitry Andric InitializeSuppressions(); 40568d75effSDimitry Andric #if !SANITIZER_GO 40668d75effSDimitry Andric InitializeLibIgnore(); 40768d75effSDimitry Andric Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 40868d75effSDimitry Andric #endif 40968d75effSDimitry Andric 41068d75effSDimitry Andric VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 41168d75effSDimitry Andric (int)internal_getpid()); 41268d75effSDimitry Andric 41368d75effSDimitry Andric // Initialize thread 0. 414*349cc55cSDimitry Andric Tid tid = ThreadCreate(thr, 0, 0, true); 415*349cc55cSDimitry Andric CHECK_EQ(tid, kMainTid); 41668d75effSDimitry Andric ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 41768d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN 41868d75effSDimitry Andric __ubsan::InitAsPlugin(); 41968d75effSDimitry Andric #endif 42068d75effSDimitry Andric ctx->initialized = true; 42168d75effSDimitry Andric 42268d75effSDimitry Andric #if !SANITIZER_GO 42368d75effSDimitry Andric Symbolizer::LateInitialize(); 424*349cc55cSDimitry Andric InitializeMemoryProfiler(); 42568d75effSDimitry Andric #endif 42668d75effSDimitry Andric 42768d75effSDimitry Andric if (flags()->stop_on_start) { 42868d75effSDimitry Andric Printf("ThreadSanitizer is suspended at startup (pid %d)." 42968d75effSDimitry Andric " Call __tsan_resume().\n", 43068d75effSDimitry Andric (int)internal_getpid()); 43168d75effSDimitry Andric while (__tsan_resumed == 0) {} 43268d75effSDimitry Andric } 43368d75effSDimitry Andric 43468d75effSDimitry Andric OnInitialize(); 43568d75effSDimitry Andric } 43668d75effSDimitry Andric 43768d75effSDimitry Andric void MaybeSpawnBackgroundThread() { 43868d75effSDimitry Andric // On MIPS, TSan initialization is run before 43968d75effSDimitry Andric // __pthread_initialize_minimal_internal() is finished, so we can not spawn 44068d75effSDimitry Andric // new threads. 44168d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__) 44268d75effSDimitry Andric static atomic_uint32_t bg_thread = {}; 44368d75effSDimitry Andric if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 44468d75effSDimitry Andric atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 44568d75effSDimitry Andric StartBackgroundThread(); 44668d75effSDimitry Andric SetSandboxingCallback(StopBackgroundThread); 44768d75effSDimitry Andric } 44868d75effSDimitry Andric #endif 44968d75effSDimitry Andric } 45068d75effSDimitry Andric 45168d75effSDimitry Andric 45268d75effSDimitry Andric int Finalize(ThreadState *thr) { 45368d75effSDimitry Andric bool failed = false; 45468d75effSDimitry Andric 455e8d8bef9SDimitry Andric if (common_flags()->print_module_map == 1) 456e8d8bef9SDimitry Andric DumpProcessMap(); 45768d75effSDimitry Andric 45868d75effSDimitry Andric if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 45968d75effSDimitry Andric SleepForMillis(flags()->atexit_sleep_ms); 46068d75effSDimitry Andric 46168d75effSDimitry Andric // Wait for pending reports. 46268d75effSDimitry Andric ctx->report_mtx.Lock(); 46368d75effSDimitry Andric { ScopedErrorReportLock l; } 46468d75effSDimitry Andric ctx->report_mtx.Unlock(); 46568d75effSDimitry Andric 46668d75effSDimitry Andric #if !SANITIZER_GO 46768d75effSDimitry Andric if (Verbosity()) AllocatorPrintStats(); 46868d75effSDimitry Andric #endif 46968d75effSDimitry Andric 47068d75effSDimitry Andric ThreadFinalize(thr); 47168d75effSDimitry Andric 47268d75effSDimitry Andric if (ctx->nreported) { 47368d75effSDimitry Andric failed = true; 47468d75effSDimitry Andric #if !SANITIZER_GO 47568d75effSDimitry Andric Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 47668d75effSDimitry Andric #else 47768d75effSDimitry Andric Printf("Found %d data race(s)\n", ctx->nreported); 47868d75effSDimitry Andric #endif 47968d75effSDimitry Andric } 48068d75effSDimitry Andric 48168d75effSDimitry Andric if (common_flags()->print_suppressions) 48268d75effSDimitry Andric PrintMatchedSuppressions(); 48368d75effSDimitry Andric 48468d75effSDimitry Andric failed = OnFinalize(failed); 48568d75effSDimitry Andric 48668d75effSDimitry Andric return failed ? common_flags()->exitcode : 0; 48768d75effSDimitry Andric } 48868d75effSDimitry Andric 48968d75effSDimitry Andric #if !SANITIZER_GO 490fe6060f1SDimitry Andric void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 491*349cc55cSDimitry Andric ctx->thread_registry.Lock(); 49268d75effSDimitry Andric ctx->report_mtx.Lock(); 493fe6060f1SDimitry Andric ScopedErrorReportLock::Lock(); 494fe6060f1SDimitry Andric // Suppress all reports in the pthread_atfork callbacks. 495fe6060f1SDimitry Andric // Reports will deadlock on the report_mtx. 496fe6060f1SDimitry Andric // We could ignore sync operations as well, 4975ffd83dbSDimitry Andric // but so far it's unclear if it will do more good or harm. 4985ffd83dbSDimitry Andric // Unnecessarily ignoring things can lead to false positives later. 499fe6060f1SDimitry Andric thr->suppress_reports++; 500fe6060f1SDimitry Andric // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 501fe6060f1SDimitry Andric // we'll assert in CheckNoLocks() unless we ignore interceptors. 502fe6060f1SDimitry Andric thr->ignore_interceptors++; 50368d75effSDimitry Andric } 50468d75effSDimitry Andric 505fe6060f1SDimitry Andric void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 506fe6060f1SDimitry Andric thr->suppress_reports--; // Enabled in ForkBefore. 507fe6060f1SDimitry Andric thr->ignore_interceptors--; 508fe6060f1SDimitry Andric ScopedErrorReportLock::Unlock(); 50968d75effSDimitry Andric ctx->report_mtx.Unlock(); 510*349cc55cSDimitry Andric ctx->thread_registry.Unlock(); 51168d75effSDimitry Andric } 51268d75effSDimitry Andric 513*349cc55cSDimitry Andric void ForkChildAfter(ThreadState *thr, uptr pc, 514*349cc55cSDimitry Andric bool start_thread) NO_THREAD_SAFETY_ANALYSIS { 515fe6060f1SDimitry Andric thr->suppress_reports--; // Enabled in ForkBefore. 516fe6060f1SDimitry Andric thr->ignore_interceptors--; 517fe6060f1SDimitry Andric ScopedErrorReportLock::Unlock(); 51868d75effSDimitry Andric ctx->report_mtx.Unlock(); 519*349cc55cSDimitry Andric ctx->thread_registry.Unlock(); 52068d75effSDimitry Andric 52168d75effSDimitry Andric uptr nthread = 0; 522*349cc55cSDimitry Andric ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */); 52368d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 52468d75effSDimitry Andric " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 52568d75effSDimitry Andric if (nthread == 1) { 526*349cc55cSDimitry Andric if (start_thread) 52768d75effSDimitry Andric StartBackgroundThread(); 52868d75effSDimitry Andric } else { 52968d75effSDimitry Andric // We've just forked a multi-threaded process. We cannot reasonably function 53068d75effSDimitry Andric // after that (some mutexes may be locked before fork). So just enable 53168d75effSDimitry Andric // ignores for everything in the hope that we will exec soon. 53268d75effSDimitry Andric ctx->after_multithreaded_fork = true; 53368d75effSDimitry Andric thr->ignore_interceptors++; 53468d75effSDimitry Andric ThreadIgnoreBegin(thr, pc); 53568d75effSDimitry Andric ThreadIgnoreSyncBegin(thr, pc); 53668d75effSDimitry Andric } 53768d75effSDimitry Andric } 53868d75effSDimitry Andric #endif 53968d75effSDimitry Andric 54068d75effSDimitry Andric #if SANITIZER_GO 54168d75effSDimitry Andric NOINLINE 54268d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) { 54368d75effSDimitry Andric const int sz = thr->shadow_stack_end - thr->shadow_stack; 54468d75effSDimitry Andric const int newsz = 2 * sz; 545*349cc55cSDimitry Andric auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr)); 54668d75effSDimitry Andric internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 547*349cc55cSDimitry Andric Free(thr->shadow_stack); 54868d75effSDimitry Andric thr->shadow_stack = newstack; 54968d75effSDimitry Andric thr->shadow_stack_pos = newstack + sz; 55068d75effSDimitry Andric thr->shadow_stack_end = newstack + newsz; 55168d75effSDimitry Andric } 55268d75effSDimitry Andric #endif 55368d75effSDimitry Andric 554*349cc55cSDimitry Andric StackID CurrentStackId(ThreadState *thr, uptr pc) { 55568d75effSDimitry Andric if (!thr->is_inited) // May happen during bootstrap. 556*349cc55cSDimitry Andric return kInvalidStackID; 55768d75effSDimitry Andric if (pc != 0) { 55868d75effSDimitry Andric #if !SANITIZER_GO 55968d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 56068d75effSDimitry Andric #else 56168d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 56268d75effSDimitry Andric GrowShadowStack(thr); 56368d75effSDimitry Andric #endif 56468d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 56568d75effSDimitry Andric thr->shadow_stack_pos++; 56668d75effSDimitry Andric } 567*349cc55cSDimitry Andric StackID id = StackDepotPut( 56868d75effSDimitry Andric StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 56968d75effSDimitry Andric if (pc != 0) 57068d75effSDimitry Andric thr->shadow_stack_pos--; 57168d75effSDimitry Andric return id; 57268d75effSDimitry Andric } 57368d75effSDimitry Andric 574*349cc55cSDimitry Andric namespace v3 { 575*349cc55cSDimitry Andric 576*349cc55cSDimitry Andric NOINLINE 577*349cc55cSDimitry Andric void TraceSwitchPart(ThreadState *thr) { 578*349cc55cSDimitry Andric Trace *trace = &thr->tctx->trace; 579*349cc55cSDimitry Andric Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos)); 580*349cc55cSDimitry Andric DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0); 581*349cc55cSDimitry Andric auto *part = trace->parts.Back(); 582*349cc55cSDimitry Andric DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos); 583*349cc55cSDimitry Andric if (part) { 584*349cc55cSDimitry Andric // We can get here when we still have space in the current trace part. 585*349cc55cSDimitry Andric // The fast-path check in TraceAcquire has false positives in the middle of 586*349cc55cSDimitry Andric // the part. Check if we are indeed at the end of the current part or not, 587*349cc55cSDimitry Andric // and fill any gaps with NopEvent's. 588*349cc55cSDimitry Andric Event *end = &part->events[TracePart::kSize]; 589*349cc55cSDimitry Andric DCHECK_GE(pos, &part->events[0]); 590*349cc55cSDimitry Andric DCHECK_LE(pos, end); 591*349cc55cSDimitry Andric if (pos + 1 < end) { 592*349cc55cSDimitry Andric if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) == 593*349cc55cSDimitry Andric TracePart::kAlignment) 594*349cc55cSDimitry Andric *pos++ = NopEvent; 595*349cc55cSDimitry Andric *pos++ = NopEvent; 596*349cc55cSDimitry Andric DCHECK_LE(pos + 2, end); 597*349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos)); 598*349cc55cSDimitry Andric // Ensure we setup trace so that the next TraceAcquire 599*349cc55cSDimitry Andric // won't detect trace part end. 600*349cc55cSDimitry Andric Event *ev; 601*349cc55cSDimitry Andric CHECK(TraceAcquire(thr, &ev)); 602*349cc55cSDimitry Andric return; 603*349cc55cSDimitry Andric } 604*349cc55cSDimitry Andric // We are indeed at the end. 605*349cc55cSDimitry Andric for (; pos < end; pos++) *pos = NopEvent; 606*349cc55cSDimitry Andric } 607*349cc55cSDimitry Andric #if !SANITIZER_GO 608*349cc55cSDimitry Andric if (ctx->after_multithreaded_fork) { 609*349cc55cSDimitry Andric // We just need to survive till exec. 610*349cc55cSDimitry Andric CHECK(part); 611*349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 612*349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0])); 613*349cc55cSDimitry Andric return; 614*349cc55cSDimitry Andric } 615*349cc55cSDimitry Andric #endif 616*349cc55cSDimitry Andric part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart(); 617*349cc55cSDimitry Andric part->trace = trace; 618*349cc55cSDimitry Andric thr->trace_prev_pc = 0; 619*349cc55cSDimitry Andric { 620*349cc55cSDimitry Andric Lock lock(&trace->mtx); 621*349cc55cSDimitry Andric trace->parts.PushBack(part); 622*349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 623*349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0])); 624*349cc55cSDimitry Andric } 625*349cc55cSDimitry Andric // Make this part self-sufficient by restoring the current stack 626*349cc55cSDimitry Andric // and mutex set in the beginning of the trace. 627*349cc55cSDimitry Andric TraceTime(thr); 628*349cc55cSDimitry Andric for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++) 629*349cc55cSDimitry Andric CHECK(TryTraceFunc(thr, *pos)); 630*349cc55cSDimitry Andric for (uptr i = 0; i < thr->mset.Size(); i++) { 631*349cc55cSDimitry Andric MutexSet::Desc d = thr->mset.Get(i); 632*349cc55cSDimitry Andric TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0, 633*349cc55cSDimitry Andric d.addr, d.stack_id); 634*349cc55cSDimitry Andric } 635*349cc55cSDimitry Andric } 636*349cc55cSDimitry Andric 637*349cc55cSDimitry Andric } // namespace v3 638*349cc55cSDimitry Andric 63968d75effSDimitry Andric void TraceSwitch(ThreadState *thr) { 64068d75effSDimitry Andric #if !SANITIZER_GO 64168d75effSDimitry Andric if (ctx->after_multithreaded_fork) 64268d75effSDimitry Andric return; 64368d75effSDimitry Andric #endif 64468d75effSDimitry Andric thr->nomalloc++; 64568d75effSDimitry Andric Trace *thr_trace = ThreadTrace(thr->tid); 64668d75effSDimitry Andric Lock l(&thr_trace->mtx); 64768d75effSDimitry Andric unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 64868d75effSDimitry Andric TraceHeader *hdr = &thr_trace->headers[trace]; 64968d75effSDimitry Andric hdr->epoch0 = thr->fast_state.epoch(); 65068d75effSDimitry Andric ObtainCurrentStack(thr, 0, &hdr->stack0); 65168d75effSDimitry Andric hdr->mset0 = thr->mset; 65268d75effSDimitry Andric thr->nomalloc--; 65368d75effSDimitry Andric } 65468d75effSDimitry Andric 655*349cc55cSDimitry Andric Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); } 65668d75effSDimitry Andric 65768d75effSDimitry Andric uptr TraceTopPC(ThreadState *thr) { 65868d75effSDimitry Andric Event *events = (Event*)GetThreadTrace(thr->tid); 65968d75effSDimitry Andric uptr pc = events[thr->fast_state.GetTracePos()]; 66068d75effSDimitry Andric return pc; 66168d75effSDimitry Andric } 66268d75effSDimitry Andric 66368d75effSDimitry Andric uptr TraceSize() { 66468d75effSDimitry Andric return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 66568d75effSDimitry Andric } 66668d75effSDimitry Andric 66768d75effSDimitry Andric uptr TraceParts() { 66868d75effSDimitry Andric return TraceSize() / kTracePartSize; 66968d75effSDimitry Andric } 67068d75effSDimitry Andric 67168d75effSDimitry Andric #if !SANITIZER_GO 67268d75effSDimitry Andric extern "C" void __tsan_trace_switch() { 67368d75effSDimitry Andric TraceSwitch(cur_thread()); 67468d75effSDimitry Andric } 67568d75effSDimitry Andric 67668d75effSDimitry Andric extern "C" void __tsan_report_race() { 67768d75effSDimitry Andric ReportRace(cur_thread()); 67868d75effSDimitry Andric } 67968d75effSDimitry Andric #endif 68068d75effSDimitry Andric 681*349cc55cSDimitry Andric void ThreadIgnoreBegin(ThreadState *thr, uptr pc) { 68268d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 68368d75effSDimitry Andric thr->ignore_reads_and_writes++; 68468d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 68568d75effSDimitry Andric thr->fast_state.SetIgnoreBit(); 68668d75effSDimitry Andric #if !SANITIZER_GO 687*349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork) 68868d75effSDimitry Andric thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 68968d75effSDimitry Andric #endif 69068d75effSDimitry Andric } 69168d75effSDimitry Andric 692*349cc55cSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr) { 69368d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 69468d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 69568d75effSDimitry Andric thr->ignore_reads_and_writes--; 69668d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) { 69768d75effSDimitry Andric thr->fast_state.ClearIgnoreBit(); 69868d75effSDimitry Andric #if !SANITIZER_GO 69968d75effSDimitry Andric thr->mop_ignore_set.Reset(); 70068d75effSDimitry Andric #endif 70168d75effSDimitry Andric } 70268d75effSDimitry Andric } 70368d75effSDimitry Andric 70468d75effSDimitry Andric #if !SANITIZER_GO 70568d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE 70668d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() { 70768d75effSDimitry Andric ThreadState *thr = cur_thread(); 70868d75effSDimitry Andric return thr->shadow_stack_pos - thr->shadow_stack; 70968d75effSDimitry Andric } 71068d75effSDimitry Andric #endif 71168d75effSDimitry Andric 712*349cc55cSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { 71368d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 71468d75effSDimitry Andric thr->ignore_sync++; 71568d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 71668d75effSDimitry Andric #if !SANITIZER_GO 717*349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork) 71868d75effSDimitry Andric thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 71968d75effSDimitry Andric #endif 72068d75effSDimitry Andric } 72168d75effSDimitry Andric 722*349cc55cSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr) { 72368d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 72468d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 72568d75effSDimitry Andric thr->ignore_sync--; 72668d75effSDimitry Andric #if !SANITIZER_GO 72768d75effSDimitry Andric if (thr->ignore_sync == 0) 72868d75effSDimitry Andric thr->sync_ignore_set.Reset(); 72968d75effSDimitry Andric #endif 73068d75effSDimitry Andric } 73168d75effSDimitry Andric 73268d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const { 73368d75effSDimitry Andric return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 73468d75effSDimitry Andric } 73568d75effSDimitry Andric 73668d75effSDimitry Andric #if SANITIZER_DEBUG 73768d75effSDimitry Andric void build_consistency_debug() {} 73868d75effSDimitry Andric #else 73968d75effSDimitry Andric void build_consistency_release() {} 74068d75effSDimitry Andric #endif 74168d75effSDimitry Andric 74268d75effSDimitry Andric } // namespace __tsan 74368d75effSDimitry Andric 744fe6060f1SDimitry Andric #if SANITIZER_CHECK_DEADLOCKS 745fe6060f1SDimitry Andric namespace __sanitizer { 746fe6060f1SDimitry Andric using namespace __tsan; 747fe6060f1SDimitry Andric MutexMeta mutex_meta[] = { 748fe6060f1SDimitry Andric {MutexInvalid, "Invalid", {}}, 749fe6060f1SDimitry Andric {MutexThreadRegistry, "ThreadRegistry", {}}, 750fe6060f1SDimitry Andric {MutexTypeTrace, "Trace", {MutexLeaf}}, 751fe6060f1SDimitry Andric {MutexTypeReport, "Report", {MutexTypeSyncVar}}, 752fe6060f1SDimitry Andric {MutexTypeSyncVar, "SyncVar", {}}, 753fe6060f1SDimitry Andric {MutexTypeAnnotations, "Annotations", {}}, 754fe6060f1SDimitry Andric {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}}, 755fe6060f1SDimitry Andric {MutexTypeFired, "Fired", {MutexLeaf}}, 756fe6060f1SDimitry Andric {MutexTypeRacy, "Racy", {MutexLeaf}}, 757fe6060f1SDimitry Andric {MutexTypeGlobalProc, "GlobalProc", {}}, 758fe6060f1SDimitry Andric {}, 759fe6060f1SDimitry Andric }; 760fe6060f1SDimitry Andric 761fe6060f1SDimitry Andric void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } 762fe6060f1SDimitry Andric } // namespace __sanitizer 763fe6060f1SDimitry Andric #endif 764