168d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===// 268d75effSDimitry Andric // 368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 668d75effSDimitry Andric // 768d75effSDimitry Andric //===----------------------------------------------------------------------===// 868d75effSDimitry Andric // 968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 1068d75effSDimitry Andric // 1168d75effSDimitry Andric // Main file (entry points) for the TSan run-time. 1268d75effSDimitry Andric //===----------------------------------------------------------------------===// 1368d75effSDimitry Andric 1468d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h" 1568d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h" 1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h" 1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h" 1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h" 2168d75effSDimitry Andric #include "tsan_defs.h" 2268d75effSDimitry Andric #include "tsan_platform.h" 2368d75effSDimitry Andric #include "tsan_rtl.h" 2468d75effSDimitry Andric #include "tsan_mman.h" 2568d75effSDimitry Andric #include "tsan_suppressions.h" 2668d75effSDimitry Andric #include "tsan_symbolize.h" 2768d75effSDimitry Andric #include "ubsan/ubsan_init.h" 2868d75effSDimitry Andric 2968d75effSDimitry Andric #ifdef __SSE3__ 3068d75effSDimitry Andric // <emmintrin.h> transitively includes <stdlib.h>, 3168d75effSDimitry Andric // and it's prohibited to include std headers into tsan runtime. 3268d75effSDimitry Andric // So we do this dirty trick. 3368d75effSDimitry Andric #define _MM_MALLOC_H_INCLUDED 3468d75effSDimitry Andric #define __MM_MALLOC_H 3568d75effSDimitry Andric #include <emmintrin.h> 3668d75effSDimitry Andric typedef __m128i m128; 3768d75effSDimitry Andric #endif 3868d75effSDimitry Andric 3968d75effSDimitry Andric volatile int __tsan_resumed = 0; 4068d75effSDimitry Andric 4168d75effSDimitry Andric extern "C" void __tsan_resume() { 4268d75effSDimitry Andric __tsan_resumed = 1; 4368d75effSDimitry Andric } 4468d75effSDimitry Andric 4568d75effSDimitry Andric namespace __tsan { 4668d75effSDimitry Andric 4768d75effSDimitry Andric #if !SANITIZER_GO && !SANITIZER_MAC 4868d75effSDimitry Andric __attribute__((tls_model("initial-exec"))) 4968d75effSDimitry Andric THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 5068d75effSDimitry Andric #endif 5168d75effSDimitry Andric static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 5268d75effSDimitry Andric Context *ctx; 5368d75effSDimitry Andric 5468d75effSDimitry Andric // Can be overriden by a front-end. 5568d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS 5668d75effSDimitry Andric bool OnFinalize(bool failed); 5768d75effSDimitry Andric void OnInitialize(); 5868d75effSDimitry Andric #else 5968d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 6068d75effSDimitry Andric bool OnFinalize(bool failed) { 6168d75effSDimitry Andric return failed; 6268d75effSDimitry Andric } 6368d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 6468d75effSDimitry Andric void OnInitialize() {} 6568d75effSDimitry Andric #endif 6668d75effSDimitry Andric 6768d75effSDimitry Andric static char thread_registry_placeholder[sizeof(ThreadRegistry)]; 6868d75effSDimitry Andric 6968d75effSDimitry Andric static ThreadContextBase *CreateThreadContext(u32 tid) { 7068d75effSDimitry Andric // Map thread trace when context is created. 7168d75effSDimitry Andric char name[50]; 7268d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace %u", tid); 7368d75effSDimitry Andric MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 7468d75effSDimitry Andric const uptr hdr = GetThreadTraceHeader(tid); 7568d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace header %u", tid); 7668d75effSDimitry Andric MapThreadTrace(hdr, sizeof(Trace), name); 7768d75effSDimitry Andric new((void*)hdr) Trace(); 7868d75effSDimitry Andric // We are going to use only a small part of the trace with the default 7968d75effSDimitry Andric // value of history_size. However, the constructor writes to the whole trace. 8068d75effSDimitry Andric // Unmap the unused part. 8168d75effSDimitry Andric uptr hdr_end = hdr + sizeof(Trace); 8268d75effSDimitry Andric hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 8368d75effSDimitry Andric hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 8468d75effSDimitry Andric if (hdr_end < hdr + sizeof(Trace)) 8568d75effSDimitry Andric UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); 8668d75effSDimitry Andric void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 8768d75effSDimitry Andric return new(mem) ThreadContext(tid); 8868d75effSDimitry Andric } 8968d75effSDimitry Andric 9068d75effSDimitry Andric #if !SANITIZER_GO 9168d75effSDimitry Andric static const u32 kThreadQuarantineSize = 16; 9268d75effSDimitry Andric #else 9368d75effSDimitry Andric static const u32 kThreadQuarantineSize = 64; 9468d75effSDimitry Andric #endif 9568d75effSDimitry Andric 9668d75effSDimitry Andric Context::Context() 9768d75effSDimitry Andric : initialized() 9868d75effSDimitry Andric , report_mtx(MutexTypeReport, StatMtxReport) 9968d75effSDimitry Andric , nreported() 10068d75effSDimitry Andric , nmissed_expected() 10168d75effSDimitry Andric , thread_registry(new(thread_registry_placeholder) ThreadRegistry( 10268d75effSDimitry Andric CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) 10368d75effSDimitry Andric , racy_mtx(MutexTypeRacy, StatMtxRacy) 10468d75effSDimitry Andric , racy_stacks() 10568d75effSDimitry Andric , racy_addresses() 10668d75effSDimitry Andric , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) 10768d75effSDimitry Andric , clock_alloc("clock allocator") { 10868d75effSDimitry Andric fired_suppressions.reserve(8); 10968d75effSDimitry Andric } 11068d75effSDimitry Andric 11168d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization. 11268d75effSDimitry Andric ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 11368d75effSDimitry Andric unsigned reuse_count, 11468d75effSDimitry Andric uptr stk_addr, uptr stk_size, 11568d75effSDimitry Andric uptr tls_addr, uptr tls_size) 11668d75effSDimitry Andric : fast_state(tid, epoch) 11768d75effSDimitry Andric // Do not touch these, rely on zero initialization, 11868d75effSDimitry Andric // they may be accessed before the ctor. 11968d75effSDimitry Andric // , ignore_reads_and_writes() 12068d75effSDimitry Andric // , ignore_interceptors() 12168d75effSDimitry Andric , clock(tid, reuse_count) 12268d75effSDimitry Andric #if !SANITIZER_GO 12368d75effSDimitry Andric , jmp_bufs() 12468d75effSDimitry Andric #endif 12568d75effSDimitry Andric , tid(tid) 12668d75effSDimitry Andric , unique_id(unique_id) 12768d75effSDimitry Andric , stk_addr(stk_addr) 12868d75effSDimitry Andric , stk_size(stk_size) 12968d75effSDimitry Andric , tls_addr(tls_addr) 13068d75effSDimitry Andric , tls_size(tls_size) 13168d75effSDimitry Andric #if !SANITIZER_GO 13268d75effSDimitry Andric , last_sleep_clock(tid) 13368d75effSDimitry Andric #endif 13468d75effSDimitry Andric { 13568d75effSDimitry Andric } 13668d75effSDimitry Andric 13768d75effSDimitry Andric #if !SANITIZER_GO 13868d75effSDimitry Andric static void MemoryProfiler(Context *ctx, fd_t fd, int i) { 13968d75effSDimitry Andric uptr n_threads; 14068d75effSDimitry Andric uptr n_running_threads; 14168d75effSDimitry Andric ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 14268d75effSDimitry Andric InternalMmapVector<char> buf(4096); 14368d75effSDimitry Andric WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); 14468d75effSDimitry Andric WriteToFile(fd, buf.data(), internal_strlen(buf.data())); 14568d75effSDimitry Andric } 14668d75effSDimitry Andric 1475ffd83dbSDimitry Andric static void *BackgroundThread(void *arg) { 14868d75effSDimitry Andric // This is a non-initialized non-user thread, nothing to see here. 14968d75effSDimitry Andric // We don't use ScopedIgnoreInterceptors, because we want ignores to be 15068d75effSDimitry Andric // enabled even when the thread function exits (e.g. during pthread thread 15168d75effSDimitry Andric // shutdown code). 15268d75effSDimitry Andric cur_thread_init(); 15368d75effSDimitry Andric cur_thread()->ignore_interceptors++; 15468d75effSDimitry Andric const u64 kMs2Ns = 1000 * 1000; 15568d75effSDimitry Andric 15668d75effSDimitry Andric fd_t mprof_fd = kInvalidFd; 15768d75effSDimitry Andric if (flags()->profile_memory && flags()->profile_memory[0]) { 15868d75effSDimitry Andric if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { 15968d75effSDimitry Andric mprof_fd = 1; 16068d75effSDimitry Andric } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { 16168d75effSDimitry Andric mprof_fd = 2; 16268d75effSDimitry Andric } else { 16368d75effSDimitry Andric InternalScopedString filename(kMaxPathLength); 16468d75effSDimitry Andric filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); 16568d75effSDimitry Andric fd_t fd = OpenFile(filename.data(), WrOnly); 16668d75effSDimitry Andric if (fd == kInvalidFd) { 16768d75effSDimitry Andric Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 16868d75effSDimitry Andric &filename[0]); 16968d75effSDimitry Andric } else { 17068d75effSDimitry Andric mprof_fd = fd; 17168d75effSDimitry Andric } 17268d75effSDimitry Andric } 17368d75effSDimitry Andric } 17468d75effSDimitry Andric 17568d75effSDimitry Andric u64 last_flush = NanoTime(); 17668d75effSDimitry Andric uptr last_rss = 0; 17768d75effSDimitry Andric for (int i = 0; 17868d75effSDimitry Andric atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 17968d75effSDimitry Andric i++) { 18068d75effSDimitry Andric SleepForMillis(100); 18168d75effSDimitry Andric u64 now = NanoTime(); 18268d75effSDimitry Andric 18368d75effSDimitry Andric // Flush memory if requested. 18468d75effSDimitry Andric if (flags()->flush_memory_ms > 0) { 18568d75effSDimitry Andric if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 18668d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 18768d75effSDimitry Andric FlushShadowMemory(); 18868d75effSDimitry Andric last_flush = NanoTime(); 18968d75effSDimitry Andric } 19068d75effSDimitry Andric } 19168d75effSDimitry Andric // GetRSS can be expensive on huge programs, so don't do it every 100ms. 19268d75effSDimitry Andric if (flags()->memory_limit_mb > 0) { 19368d75effSDimitry Andric uptr rss = GetRSS(); 19468d75effSDimitry Andric uptr limit = uptr(flags()->memory_limit_mb) << 20; 19568d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flush check" 19668d75effSDimitry Andric " RSS=%llu LAST=%llu LIMIT=%llu\n", 19768d75effSDimitry Andric (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 19868d75effSDimitry Andric if (2 * rss > limit + last_rss) { 19968d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 20068d75effSDimitry Andric FlushShadowMemory(); 20168d75effSDimitry Andric rss = GetRSS(); 20268d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 20368d75effSDimitry Andric } 20468d75effSDimitry Andric last_rss = rss; 20568d75effSDimitry Andric } 20668d75effSDimitry Andric 20768d75effSDimitry Andric // Write memory profile if requested. 20868d75effSDimitry Andric if (mprof_fd != kInvalidFd) 20968d75effSDimitry Andric MemoryProfiler(ctx, mprof_fd, i); 21068d75effSDimitry Andric 21168d75effSDimitry Andric // Flush symbolizer cache if requested. 21268d75effSDimitry Andric if (flags()->flush_symbolizer_ms > 0) { 21368d75effSDimitry Andric u64 last = atomic_load(&ctx->last_symbolize_time_ns, 21468d75effSDimitry Andric memory_order_relaxed); 21568d75effSDimitry Andric if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 21668d75effSDimitry Andric Lock l(&ctx->report_mtx); 21768d75effSDimitry Andric ScopedErrorReportLock l2; 21868d75effSDimitry Andric SymbolizeFlush(); 21968d75effSDimitry Andric atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 22068d75effSDimitry Andric } 22168d75effSDimitry Andric } 22268d75effSDimitry Andric } 2235ffd83dbSDimitry Andric return nullptr; 22468d75effSDimitry Andric } 22568d75effSDimitry Andric 22668d75effSDimitry Andric static void StartBackgroundThread() { 22768d75effSDimitry Andric ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 22868d75effSDimitry Andric } 22968d75effSDimitry Andric 23068d75effSDimitry Andric #ifndef __mips__ 23168d75effSDimitry Andric static void StopBackgroundThread() { 23268d75effSDimitry Andric atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 23368d75effSDimitry Andric internal_join_thread(ctx->background_thread); 23468d75effSDimitry Andric ctx->background_thread = 0; 23568d75effSDimitry Andric } 23668d75effSDimitry Andric #endif 23768d75effSDimitry Andric #endif 23868d75effSDimitry Andric 23968d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) { 24068d75effSDimitry Andric ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); 24168d75effSDimitry Andric } 24268d75effSDimitry Andric 24368d75effSDimitry Andric #if !SANITIZER_GO 24468d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 24568d75effSDimitry Andric if (size == 0) return; 24668d75effSDimitry Andric DontNeedShadowFor(addr, size); 24768d75effSDimitry Andric ScopedGlobalProcessor sgp; 24868d75effSDimitry Andric ctx->metamap.ResetRange(thr->proc(), addr, size); 24968d75effSDimitry Andric } 25068d75effSDimitry Andric #endif 25168d75effSDimitry Andric 25268d75effSDimitry Andric void MapShadow(uptr addr, uptr size) { 25368d75effSDimitry Andric // Global data is not 64K aligned, but there are no adjacent mappings, 25468d75effSDimitry Andric // so we can get away with unaligned mapping. 25568d75effSDimitry Andric // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 25668d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 25768d75effSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 25868d75effSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 259*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, 260*e8d8bef9SDimitry Andric "shadow")) 26168d75effSDimitry Andric Die(); 26268d75effSDimitry Andric 26368d75effSDimitry Andric // Meta shadow is 2:1, so tread carefully. 26468d75effSDimitry Andric static bool data_mapped = false; 26568d75effSDimitry Andric static uptr mapped_meta_end = 0; 26668d75effSDimitry Andric uptr meta_begin = (uptr)MemToMeta(addr); 26768d75effSDimitry Andric uptr meta_end = (uptr)MemToMeta(addr + size); 26868d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 26968d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 27068d75effSDimitry Andric if (!data_mapped) { 27168d75effSDimitry Andric // First call maps data+bss. 27268d75effSDimitry Andric data_mapped = true; 273*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 274*e8d8bef9SDimitry Andric "meta shadow")) 27568d75effSDimitry Andric Die(); 27668d75effSDimitry Andric } else { 27768d75effSDimitry Andric // Mapping continous heap. 27868d75effSDimitry Andric // Windows wants 64K alignment. 27968d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 28068d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 28168d75effSDimitry Andric if (meta_end <= mapped_meta_end) 28268d75effSDimitry Andric return; 28368d75effSDimitry Andric if (meta_begin < mapped_meta_end) 28468d75effSDimitry Andric meta_begin = mapped_meta_end; 285*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 286*e8d8bef9SDimitry Andric "meta shadow")) 28768d75effSDimitry Andric Die(); 28868d75effSDimitry Andric mapped_meta_end = meta_end; 28968d75effSDimitry Andric } 29068d75effSDimitry Andric VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", 29168d75effSDimitry Andric addr, addr+size, meta_begin, meta_end); 29268d75effSDimitry Andric } 29368d75effSDimitry Andric 29468d75effSDimitry Andric void MapThreadTrace(uptr addr, uptr size, const char *name) { 29568d75effSDimitry Andric DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 29668d75effSDimitry Andric CHECK_GE(addr, TraceMemBeg()); 29768d75effSDimitry Andric CHECK_LE(addr + size, TraceMemEnd()); 29868d75effSDimitry Andric CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 299*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(addr, size, name)) { 30068d75effSDimitry Andric Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", 30168d75effSDimitry Andric addr, size); 30268d75effSDimitry Andric Die(); 30368d75effSDimitry Andric } 30468d75effSDimitry Andric } 30568d75effSDimitry Andric 30668d75effSDimitry Andric static void CheckShadowMapping() { 30768d75effSDimitry Andric uptr beg, end; 30868d75effSDimitry Andric for (int i = 0; GetUserRegion(i, &beg, &end); i++) { 30968d75effSDimitry Andric // Skip cases for empty regions (heap definition for architectures that 31068d75effSDimitry Andric // do not use 64-bit allocator). 31168d75effSDimitry Andric if (beg == end) 31268d75effSDimitry Andric continue; 31368d75effSDimitry Andric VPrintf(3, "checking shadow region %p-%p\n", beg, end); 31468d75effSDimitry Andric uptr prev = 0; 31568d75effSDimitry Andric for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { 31668d75effSDimitry Andric for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { 31768d75effSDimitry Andric const uptr p = RoundDown(p0 + x, kShadowCell); 31868d75effSDimitry Andric if (p < beg || p >= end) 31968d75effSDimitry Andric continue; 32068d75effSDimitry Andric const uptr s = MemToShadow(p); 32168d75effSDimitry Andric const uptr m = (uptr)MemToMeta(p); 32268d75effSDimitry Andric VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); 32368d75effSDimitry Andric CHECK(IsAppMem(p)); 32468d75effSDimitry Andric CHECK(IsShadowMem(s)); 32568d75effSDimitry Andric CHECK_EQ(p, ShadowToMem(s)); 32668d75effSDimitry Andric CHECK(IsMetaMem(m)); 32768d75effSDimitry Andric if (prev) { 32868d75effSDimitry Andric // Ensure that shadow and meta mappings are linear within a single 32968d75effSDimitry Andric // user range. Lots of code that processes memory ranges assumes it. 33068d75effSDimitry Andric const uptr prev_s = MemToShadow(prev); 33168d75effSDimitry Andric const uptr prev_m = (uptr)MemToMeta(prev); 33268d75effSDimitry Andric CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); 33368d75effSDimitry Andric CHECK_EQ((m - prev_m) / kMetaShadowSize, 33468d75effSDimitry Andric (p - prev) / kMetaShadowCell); 33568d75effSDimitry Andric } 33668d75effSDimitry Andric prev = p; 33768d75effSDimitry Andric } 33868d75effSDimitry Andric } 33968d75effSDimitry Andric } 34068d75effSDimitry Andric } 34168d75effSDimitry Andric 34268d75effSDimitry Andric #if !SANITIZER_GO 34368d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *, 34468d75effSDimitry Andric BufferedStackTrace *stack) { 34568d75effSDimitry Andric stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 34668d75effSDimitry Andric common_flags()->fast_unwind_on_fatal); 34768d75effSDimitry Andric } 34868d75effSDimitry Andric 34968d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 35068d75effSDimitry Andric HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 35168d75effSDimitry Andric } 35268d75effSDimitry Andric #endif 35368d75effSDimitry Andric 35468d75effSDimitry Andric void Initialize(ThreadState *thr) { 35568d75effSDimitry Andric // Thread safe because done before all threads exist. 35668d75effSDimitry Andric static bool is_initialized = false; 35768d75effSDimitry Andric if (is_initialized) 35868d75effSDimitry Andric return; 35968d75effSDimitry Andric is_initialized = true; 36068d75effSDimitry Andric // We are not ready to handle interceptors yet. 36168d75effSDimitry Andric ScopedIgnoreInterceptors ignore; 36268d75effSDimitry Andric SanitizerToolName = "ThreadSanitizer"; 36368d75effSDimitry Andric // Install tool-specific callbacks in sanitizer_common. 36468d75effSDimitry Andric SetCheckFailedCallback(TsanCheckFailed); 36568d75effSDimitry Andric 36668d75effSDimitry Andric ctx = new(ctx_placeholder) Context; 36768d75effSDimitry Andric const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 36868d75effSDimitry Andric const char *options = GetEnv(env_name); 36968d75effSDimitry Andric CacheBinaryName(); 37068d75effSDimitry Andric CheckASLR(); 37168d75effSDimitry Andric InitializeFlags(&ctx->flags, options, env_name); 37268d75effSDimitry Andric AvoidCVE_2016_2143(); 37368d75effSDimitry Andric __sanitizer::InitializePlatformEarly(); 37468d75effSDimitry Andric __tsan::InitializePlatformEarly(); 37568d75effSDimitry Andric 37668d75effSDimitry Andric #if !SANITIZER_GO 37768d75effSDimitry Andric // Re-exec ourselves if we need to set additional env or command line args. 37868d75effSDimitry Andric MaybeReexec(); 37968d75effSDimitry Andric 38068d75effSDimitry Andric InitializeAllocator(); 38168d75effSDimitry Andric ReplaceSystemMalloc(); 38268d75effSDimitry Andric #endif 38368d75effSDimitry Andric if (common_flags()->detect_deadlocks) 38468d75effSDimitry Andric ctx->dd = DDetector::Create(flags()); 38568d75effSDimitry Andric Processor *proc = ProcCreate(); 38668d75effSDimitry Andric ProcWire(proc, thr); 38768d75effSDimitry Andric InitializeInterceptors(); 38868d75effSDimitry Andric CheckShadowMapping(); 38968d75effSDimitry Andric InitializePlatform(); 39068d75effSDimitry Andric InitializeMutex(); 39168d75effSDimitry Andric InitializeDynamicAnnotations(); 39268d75effSDimitry Andric #if !SANITIZER_GO 39368d75effSDimitry Andric InitializeShadowMemory(); 39468d75effSDimitry Andric InitializeAllocatorLate(); 39568d75effSDimitry Andric InstallDeadlySignalHandlers(TsanOnDeadlySignal); 39668d75effSDimitry Andric #endif 39768d75effSDimitry Andric // Setup correct file descriptor for error reports. 39868d75effSDimitry Andric __sanitizer_set_report_path(common_flags()->log_path); 39968d75effSDimitry Andric InitializeSuppressions(); 40068d75effSDimitry Andric #if !SANITIZER_GO 40168d75effSDimitry Andric InitializeLibIgnore(); 40268d75effSDimitry Andric Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 40368d75effSDimitry Andric #endif 40468d75effSDimitry Andric 40568d75effSDimitry Andric VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 40668d75effSDimitry Andric (int)internal_getpid()); 40768d75effSDimitry Andric 40868d75effSDimitry Andric // Initialize thread 0. 40968d75effSDimitry Andric int tid = ThreadCreate(thr, 0, 0, true); 41068d75effSDimitry Andric CHECK_EQ(tid, 0); 41168d75effSDimitry Andric ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 41268d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN 41368d75effSDimitry Andric __ubsan::InitAsPlugin(); 41468d75effSDimitry Andric #endif 41568d75effSDimitry Andric ctx->initialized = true; 41668d75effSDimitry Andric 41768d75effSDimitry Andric #if !SANITIZER_GO 41868d75effSDimitry Andric Symbolizer::LateInitialize(); 41968d75effSDimitry Andric #endif 42068d75effSDimitry Andric 42168d75effSDimitry Andric if (flags()->stop_on_start) { 42268d75effSDimitry Andric Printf("ThreadSanitizer is suspended at startup (pid %d)." 42368d75effSDimitry Andric " Call __tsan_resume().\n", 42468d75effSDimitry Andric (int)internal_getpid()); 42568d75effSDimitry Andric while (__tsan_resumed == 0) {} 42668d75effSDimitry Andric } 42768d75effSDimitry Andric 42868d75effSDimitry Andric OnInitialize(); 42968d75effSDimitry Andric } 43068d75effSDimitry Andric 43168d75effSDimitry Andric void MaybeSpawnBackgroundThread() { 43268d75effSDimitry Andric // On MIPS, TSan initialization is run before 43368d75effSDimitry Andric // __pthread_initialize_minimal_internal() is finished, so we can not spawn 43468d75effSDimitry Andric // new threads. 43568d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__) 43668d75effSDimitry Andric static atomic_uint32_t bg_thread = {}; 43768d75effSDimitry Andric if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 43868d75effSDimitry Andric atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 43968d75effSDimitry Andric StartBackgroundThread(); 44068d75effSDimitry Andric SetSandboxingCallback(StopBackgroundThread); 44168d75effSDimitry Andric } 44268d75effSDimitry Andric #endif 44368d75effSDimitry Andric } 44468d75effSDimitry Andric 44568d75effSDimitry Andric 44668d75effSDimitry Andric int Finalize(ThreadState *thr) { 44768d75effSDimitry Andric bool failed = false; 44868d75effSDimitry Andric 449*e8d8bef9SDimitry Andric if (common_flags()->print_module_map == 1) 450*e8d8bef9SDimitry Andric DumpProcessMap(); 45168d75effSDimitry Andric 45268d75effSDimitry Andric if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 45368d75effSDimitry Andric SleepForMillis(flags()->atexit_sleep_ms); 45468d75effSDimitry Andric 45568d75effSDimitry Andric // Wait for pending reports. 45668d75effSDimitry Andric ctx->report_mtx.Lock(); 45768d75effSDimitry Andric { ScopedErrorReportLock l; } 45868d75effSDimitry Andric ctx->report_mtx.Unlock(); 45968d75effSDimitry Andric 46068d75effSDimitry Andric #if !SANITIZER_GO 46168d75effSDimitry Andric if (Verbosity()) AllocatorPrintStats(); 46268d75effSDimitry Andric #endif 46368d75effSDimitry Andric 46468d75effSDimitry Andric ThreadFinalize(thr); 46568d75effSDimitry Andric 46668d75effSDimitry Andric if (ctx->nreported) { 46768d75effSDimitry Andric failed = true; 46868d75effSDimitry Andric #if !SANITIZER_GO 46968d75effSDimitry Andric Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 47068d75effSDimitry Andric #else 47168d75effSDimitry Andric Printf("Found %d data race(s)\n", ctx->nreported); 47268d75effSDimitry Andric #endif 47368d75effSDimitry Andric } 47468d75effSDimitry Andric 47568d75effSDimitry Andric if (ctx->nmissed_expected) { 47668d75effSDimitry Andric failed = true; 47768d75effSDimitry Andric Printf("ThreadSanitizer: missed %d expected races\n", 47868d75effSDimitry Andric ctx->nmissed_expected); 47968d75effSDimitry Andric } 48068d75effSDimitry Andric 48168d75effSDimitry Andric if (common_flags()->print_suppressions) 48268d75effSDimitry Andric PrintMatchedSuppressions(); 48368d75effSDimitry Andric #if !SANITIZER_GO 48468d75effSDimitry Andric if (flags()->print_benign) 48568d75effSDimitry Andric PrintMatchedBenignRaces(); 48668d75effSDimitry Andric #endif 48768d75effSDimitry Andric 48868d75effSDimitry Andric failed = OnFinalize(failed); 48968d75effSDimitry Andric 49068d75effSDimitry Andric #if TSAN_COLLECT_STATS 49168d75effSDimitry Andric StatAggregate(ctx->stat, thr->stat); 49268d75effSDimitry Andric StatOutput(ctx->stat); 49368d75effSDimitry Andric #endif 49468d75effSDimitry Andric 49568d75effSDimitry Andric return failed ? common_flags()->exitcode : 0; 49668d75effSDimitry Andric } 49768d75effSDimitry Andric 49868d75effSDimitry Andric #if !SANITIZER_GO 49968d75effSDimitry Andric void ForkBefore(ThreadState *thr, uptr pc) { 50068d75effSDimitry Andric ctx->thread_registry->Lock(); 50168d75effSDimitry Andric ctx->report_mtx.Lock(); 5025ffd83dbSDimitry Andric // Ignore memory accesses in the pthread_atfork callbacks. 5035ffd83dbSDimitry Andric // If any of them triggers a data race we will deadlock 5045ffd83dbSDimitry Andric // on the report_mtx. 5055ffd83dbSDimitry Andric // We could ignore interceptors and sync operations as well, 5065ffd83dbSDimitry Andric // but so far it's unclear if it will do more good or harm. 5075ffd83dbSDimitry Andric // Unnecessarily ignoring things can lead to false positives later. 5085ffd83dbSDimitry Andric ThreadIgnoreBegin(thr, pc); 50968d75effSDimitry Andric } 51068d75effSDimitry Andric 51168d75effSDimitry Andric void ForkParentAfter(ThreadState *thr, uptr pc) { 5125ffd83dbSDimitry Andric ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. 51368d75effSDimitry Andric ctx->report_mtx.Unlock(); 51468d75effSDimitry Andric ctx->thread_registry->Unlock(); 51568d75effSDimitry Andric } 51668d75effSDimitry Andric 51768d75effSDimitry Andric void ForkChildAfter(ThreadState *thr, uptr pc) { 5185ffd83dbSDimitry Andric ThreadIgnoreEnd(thr, pc); // Begin is in ForkBefore. 51968d75effSDimitry Andric ctx->report_mtx.Unlock(); 52068d75effSDimitry Andric ctx->thread_registry->Unlock(); 52168d75effSDimitry Andric 52268d75effSDimitry Andric uptr nthread = 0; 52368d75effSDimitry Andric ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); 52468d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 52568d75effSDimitry Andric " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 52668d75effSDimitry Andric if (nthread == 1) { 52768d75effSDimitry Andric StartBackgroundThread(); 52868d75effSDimitry Andric } else { 52968d75effSDimitry Andric // We've just forked a multi-threaded process. We cannot reasonably function 53068d75effSDimitry Andric // after that (some mutexes may be locked before fork). So just enable 53168d75effSDimitry Andric // ignores for everything in the hope that we will exec soon. 53268d75effSDimitry Andric ctx->after_multithreaded_fork = true; 53368d75effSDimitry Andric thr->ignore_interceptors++; 53468d75effSDimitry Andric ThreadIgnoreBegin(thr, pc); 53568d75effSDimitry Andric ThreadIgnoreSyncBegin(thr, pc); 53668d75effSDimitry Andric } 53768d75effSDimitry Andric } 53868d75effSDimitry Andric #endif 53968d75effSDimitry Andric 54068d75effSDimitry Andric #if SANITIZER_GO 54168d75effSDimitry Andric NOINLINE 54268d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) { 54368d75effSDimitry Andric const int sz = thr->shadow_stack_end - thr->shadow_stack; 54468d75effSDimitry Andric const int newsz = 2 * sz; 54568d75effSDimitry Andric uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 54668d75effSDimitry Andric newsz * sizeof(uptr)); 54768d75effSDimitry Andric internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 54868d75effSDimitry Andric internal_free(thr->shadow_stack); 54968d75effSDimitry Andric thr->shadow_stack = newstack; 55068d75effSDimitry Andric thr->shadow_stack_pos = newstack + sz; 55168d75effSDimitry Andric thr->shadow_stack_end = newstack + newsz; 55268d75effSDimitry Andric } 55368d75effSDimitry Andric #endif 55468d75effSDimitry Andric 55568d75effSDimitry Andric u32 CurrentStackId(ThreadState *thr, uptr pc) { 55668d75effSDimitry Andric if (!thr->is_inited) // May happen during bootstrap. 55768d75effSDimitry Andric return 0; 55868d75effSDimitry Andric if (pc != 0) { 55968d75effSDimitry Andric #if !SANITIZER_GO 56068d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 56168d75effSDimitry Andric #else 56268d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 56368d75effSDimitry Andric GrowShadowStack(thr); 56468d75effSDimitry Andric #endif 56568d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 56668d75effSDimitry Andric thr->shadow_stack_pos++; 56768d75effSDimitry Andric } 56868d75effSDimitry Andric u32 id = StackDepotPut( 56968d75effSDimitry Andric StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 57068d75effSDimitry Andric if (pc != 0) 57168d75effSDimitry Andric thr->shadow_stack_pos--; 57268d75effSDimitry Andric return id; 57368d75effSDimitry Andric } 57468d75effSDimitry Andric 57568d75effSDimitry Andric void TraceSwitch(ThreadState *thr) { 57668d75effSDimitry Andric #if !SANITIZER_GO 57768d75effSDimitry Andric if (ctx->after_multithreaded_fork) 57868d75effSDimitry Andric return; 57968d75effSDimitry Andric #endif 58068d75effSDimitry Andric thr->nomalloc++; 58168d75effSDimitry Andric Trace *thr_trace = ThreadTrace(thr->tid); 58268d75effSDimitry Andric Lock l(&thr_trace->mtx); 58368d75effSDimitry Andric unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 58468d75effSDimitry Andric TraceHeader *hdr = &thr_trace->headers[trace]; 58568d75effSDimitry Andric hdr->epoch0 = thr->fast_state.epoch(); 58668d75effSDimitry Andric ObtainCurrentStack(thr, 0, &hdr->stack0); 58768d75effSDimitry Andric hdr->mset0 = thr->mset; 58868d75effSDimitry Andric thr->nomalloc--; 58968d75effSDimitry Andric } 59068d75effSDimitry Andric 59168d75effSDimitry Andric Trace *ThreadTrace(int tid) { 59268d75effSDimitry Andric return (Trace*)GetThreadTraceHeader(tid); 59368d75effSDimitry Andric } 59468d75effSDimitry Andric 59568d75effSDimitry Andric uptr TraceTopPC(ThreadState *thr) { 59668d75effSDimitry Andric Event *events = (Event*)GetThreadTrace(thr->tid); 59768d75effSDimitry Andric uptr pc = events[thr->fast_state.GetTracePos()]; 59868d75effSDimitry Andric return pc; 59968d75effSDimitry Andric } 60068d75effSDimitry Andric 60168d75effSDimitry Andric uptr TraceSize() { 60268d75effSDimitry Andric return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 60368d75effSDimitry Andric } 60468d75effSDimitry Andric 60568d75effSDimitry Andric uptr TraceParts() { 60668d75effSDimitry Andric return TraceSize() / kTracePartSize; 60768d75effSDimitry Andric } 60868d75effSDimitry Andric 60968d75effSDimitry Andric #if !SANITIZER_GO 61068d75effSDimitry Andric extern "C" void __tsan_trace_switch() { 61168d75effSDimitry Andric TraceSwitch(cur_thread()); 61268d75effSDimitry Andric } 61368d75effSDimitry Andric 61468d75effSDimitry Andric extern "C" void __tsan_report_race() { 61568d75effSDimitry Andric ReportRace(cur_thread()); 61668d75effSDimitry Andric } 61768d75effSDimitry Andric #endif 61868d75effSDimitry Andric 61968d75effSDimitry Andric ALWAYS_INLINE 62068d75effSDimitry Andric Shadow LoadShadow(u64 *p) { 62168d75effSDimitry Andric u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 62268d75effSDimitry Andric return Shadow(raw); 62368d75effSDimitry Andric } 62468d75effSDimitry Andric 62568d75effSDimitry Andric ALWAYS_INLINE 62668d75effSDimitry Andric void StoreShadow(u64 *sp, u64 s) { 62768d75effSDimitry Andric atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 62868d75effSDimitry Andric } 62968d75effSDimitry Andric 63068d75effSDimitry Andric ALWAYS_INLINE 63168d75effSDimitry Andric void StoreIfNotYetStored(u64 *sp, u64 *s) { 63268d75effSDimitry Andric StoreShadow(sp, *s); 63368d75effSDimitry Andric *s = 0; 63468d75effSDimitry Andric } 63568d75effSDimitry Andric 63668d75effSDimitry Andric ALWAYS_INLINE 63768d75effSDimitry Andric void HandleRace(ThreadState *thr, u64 *shadow_mem, 63868d75effSDimitry Andric Shadow cur, Shadow old) { 63968d75effSDimitry Andric thr->racy_state[0] = cur.raw(); 64068d75effSDimitry Andric thr->racy_state[1] = old.raw(); 64168d75effSDimitry Andric thr->racy_shadow_addr = shadow_mem; 64268d75effSDimitry Andric #if !SANITIZER_GO 64368d75effSDimitry Andric HACKY_CALL(__tsan_report_race); 64468d75effSDimitry Andric #else 64568d75effSDimitry Andric ReportRace(thr); 64668d75effSDimitry Andric #endif 64768d75effSDimitry Andric } 64868d75effSDimitry Andric 64968d75effSDimitry Andric static inline bool HappensBefore(Shadow old, ThreadState *thr) { 65068d75effSDimitry Andric return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 65168d75effSDimitry Andric } 65268d75effSDimitry Andric 65368d75effSDimitry Andric ALWAYS_INLINE 65468d75effSDimitry Andric void MemoryAccessImpl1(ThreadState *thr, uptr addr, 65568d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 65668d75effSDimitry Andric u64 *shadow_mem, Shadow cur) { 65768d75effSDimitry Andric StatInc(thr, StatMop); 65868d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 65968d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 66068d75effSDimitry Andric 66168d75effSDimitry Andric // This potentially can live in an MMX/SSE scratch register. 66268d75effSDimitry Andric // The required intrinsics are: 66368d75effSDimitry Andric // __m128i _mm_move_epi64(__m128i*); 66468d75effSDimitry Andric // _mm_storel_epi64(u64*, __m128i); 66568d75effSDimitry Andric u64 store_word = cur.raw(); 66668d75effSDimitry Andric bool stored = false; 66768d75effSDimitry Andric 66868d75effSDimitry Andric // scan all the shadow values and dispatch to 4 categories: 66968d75effSDimitry Andric // same, replace, candidate and race (see comments below). 67068d75effSDimitry Andric // we consider only 3 cases regarding access sizes: 67168d75effSDimitry Andric // equal, intersect and not intersect. initially I considered 67268d75effSDimitry Andric // larger and smaller as well, it allowed to replace some 67368d75effSDimitry Andric // 'candidates' with 'same' or 'replace', but I think 67468d75effSDimitry Andric // it's just not worth it (performance- and complexity-wise). 67568d75effSDimitry Andric 67668d75effSDimitry Andric Shadow old(0); 67768d75effSDimitry Andric 67868d75effSDimitry Andric // It release mode we manually unroll the loop, 67968d75effSDimitry Andric // because empirically gcc generates better code this way. 68068d75effSDimitry Andric // However, we can't afford unrolling in debug mode, because the function 68168d75effSDimitry Andric // consumes almost 4K of stack. Gtest gives only 4K of stack to death test 68268d75effSDimitry Andric // threads, which is not enough for the unrolled loop. 68368d75effSDimitry Andric #if SANITIZER_DEBUG 68468d75effSDimitry Andric for (int idx = 0; idx < 4; idx++) { 68568d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 68668d75effSDimitry Andric } 68768d75effSDimitry Andric #else 68868d75effSDimitry Andric int idx = 0; 68968d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 69068d75effSDimitry Andric idx = 1; 69168d75effSDimitry Andric if (stored) { 69268d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 69368d75effSDimitry Andric } else { 69468d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 69568d75effSDimitry Andric } 69668d75effSDimitry Andric idx = 2; 69768d75effSDimitry Andric if (stored) { 69868d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 69968d75effSDimitry Andric } else { 70068d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 70168d75effSDimitry Andric } 70268d75effSDimitry Andric idx = 3; 70368d75effSDimitry Andric if (stored) { 70468d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 70568d75effSDimitry Andric } else { 70668d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 70768d75effSDimitry Andric } 70868d75effSDimitry Andric #endif 70968d75effSDimitry Andric 71068d75effSDimitry Andric // we did not find any races and had already stored 71168d75effSDimitry Andric // the current access info, so we are done 71268d75effSDimitry Andric if (LIKELY(stored)) 71368d75effSDimitry Andric return; 71468d75effSDimitry Andric // choose a random candidate slot and replace it 71568d75effSDimitry Andric StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 71668d75effSDimitry Andric StatInc(thr, StatShadowReplace); 71768d75effSDimitry Andric return; 71868d75effSDimitry Andric RACE: 71968d75effSDimitry Andric HandleRace(thr, shadow_mem, cur, old); 72068d75effSDimitry Andric return; 72168d75effSDimitry Andric } 72268d75effSDimitry Andric 72368d75effSDimitry Andric void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 72468d75effSDimitry Andric int size, bool kAccessIsWrite, bool kIsAtomic) { 72568d75effSDimitry Andric while (size) { 72668d75effSDimitry Andric int size1 = 1; 72768d75effSDimitry Andric int kAccessSizeLog = kSizeLog1; 72868d75effSDimitry Andric if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { 72968d75effSDimitry Andric size1 = 8; 73068d75effSDimitry Andric kAccessSizeLog = kSizeLog8; 73168d75effSDimitry Andric } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { 73268d75effSDimitry Andric size1 = 4; 73368d75effSDimitry Andric kAccessSizeLog = kSizeLog4; 73468d75effSDimitry Andric } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { 73568d75effSDimitry Andric size1 = 2; 73668d75effSDimitry Andric kAccessSizeLog = kSizeLog2; 73768d75effSDimitry Andric } 73868d75effSDimitry Andric MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); 73968d75effSDimitry Andric addr += size1; 74068d75effSDimitry Andric size -= size1; 74168d75effSDimitry Andric } 74268d75effSDimitry Andric } 74368d75effSDimitry Andric 74468d75effSDimitry Andric ALWAYS_INLINE 74568d75effSDimitry Andric bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 74668d75effSDimitry Andric Shadow cur(a); 74768d75effSDimitry Andric for (uptr i = 0; i < kShadowCnt; i++) { 74868d75effSDimitry Andric Shadow old(LoadShadow(&s[i])); 74968d75effSDimitry Andric if (Shadow::Addr0AndSizeAreEqual(cur, old) && 75068d75effSDimitry Andric old.TidWithIgnore() == cur.TidWithIgnore() && 75168d75effSDimitry Andric old.epoch() > sync_epoch && 75268d75effSDimitry Andric old.IsAtomic() == cur.IsAtomic() && 75368d75effSDimitry Andric old.IsRead() <= cur.IsRead()) 75468d75effSDimitry Andric return true; 75568d75effSDimitry Andric } 75668d75effSDimitry Andric return false; 75768d75effSDimitry Andric } 75868d75effSDimitry Andric 75968d75effSDimitry Andric #if defined(__SSE3__) 76068d75effSDimitry Andric #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ 76168d75effSDimitry Andric _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ 76268d75effSDimitry Andric (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) 76368d75effSDimitry Andric ALWAYS_INLINE 76468d75effSDimitry Andric bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 76568d75effSDimitry Andric // This is an optimized version of ContainsSameAccessSlow. 76668d75effSDimitry Andric // load current access into access[0:63] 76768d75effSDimitry Andric const m128 access = _mm_cvtsi64_si128(a); 76868d75effSDimitry Andric // duplicate high part of access in addr0: 76968d75effSDimitry Andric // addr0[0:31] = access[32:63] 77068d75effSDimitry Andric // addr0[32:63] = access[32:63] 77168d75effSDimitry Andric // addr0[64:95] = access[32:63] 77268d75effSDimitry Andric // addr0[96:127] = access[32:63] 77368d75effSDimitry Andric const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); 77468d75effSDimitry Andric // load 4 shadow slots 77568d75effSDimitry Andric const m128 shadow0 = _mm_load_si128((__m128i*)s); 77668d75effSDimitry Andric const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); 77768d75effSDimitry Andric // load high parts of 4 shadow slots into addr_vect: 77868d75effSDimitry Andric // addr_vect[0:31] = shadow0[32:63] 77968d75effSDimitry Andric // addr_vect[32:63] = shadow0[96:127] 78068d75effSDimitry Andric // addr_vect[64:95] = shadow1[32:63] 78168d75effSDimitry Andric // addr_vect[96:127] = shadow1[96:127] 78268d75effSDimitry Andric m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); 78368d75effSDimitry Andric if (!is_write) { 78468d75effSDimitry Andric // set IsRead bit in addr_vect 78568d75effSDimitry Andric const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); 78668d75effSDimitry Andric const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); 78768d75effSDimitry Andric addr_vect = _mm_or_si128(addr_vect, rw_mask); 78868d75effSDimitry Andric } 78968d75effSDimitry Andric // addr0 == addr_vect? 79068d75effSDimitry Andric const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); 79168d75effSDimitry Andric // epoch1[0:63] = sync_epoch 79268d75effSDimitry Andric const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); 79368d75effSDimitry Andric // epoch[0:31] = sync_epoch[0:31] 79468d75effSDimitry Andric // epoch[32:63] = sync_epoch[0:31] 79568d75effSDimitry Andric // epoch[64:95] = sync_epoch[0:31] 79668d75effSDimitry Andric // epoch[96:127] = sync_epoch[0:31] 79768d75effSDimitry Andric const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); 79868d75effSDimitry Andric // load low parts of shadow cell epochs into epoch_vect: 79968d75effSDimitry Andric // epoch_vect[0:31] = shadow0[0:31] 80068d75effSDimitry Andric // epoch_vect[32:63] = shadow0[64:95] 80168d75effSDimitry Andric // epoch_vect[64:95] = shadow1[0:31] 80268d75effSDimitry Andric // epoch_vect[96:127] = shadow1[64:95] 80368d75effSDimitry Andric const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); 80468d75effSDimitry Andric // epoch_vect >= sync_epoch? 80568d75effSDimitry Andric const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); 80668d75effSDimitry Andric // addr_res & epoch_res 80768d75effSDimitry Andric const m128 res = _mm_and_si128(addr_res, epoch_res); 80868d75effSDimitry Andric // mask[0] = res[7] 80968d75effSDimitry Andric // mask[1] = res[15] 81068d75effSDimitry Andric // ... 81168d75effSDimitry Andric // mask[15] = res[127] 81268d75effSDimitry Andric const int mask = _mm_movemask_epi8(res); 81368d75effSDimitry Andric return mask != 0; 81468d75effSDimitry Andric } 81568d75effSDimitry Andric #endif 81668d75effSDimitry Andric 81768d75effSDimitry Andric ALWAYS_INLINE 81868d75effSDimitry Andric bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 81968d75effSDimitry Andric #if defined(__SSE3__) 82068d75effSDimitry Andric bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); 82168d75effSDimitry Andric // NOTE: this check can fail if the shadow is concurrently mutated 82268d75effSDimitry Andric // by other threads. But it still can be useful if you modify 82368d75effSDimitry Andric // ContainsSameAccessFast and want to ensure that it's not completely broken. 82468d75effSDimitry Andric // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); 82568d75effSDimitry Andric return res; 82668d75effSDimitry Andric #else 82768d75effSDimitry Andric return ContainsSameAccessSlow(s, a, sync_epoch, is_write); 82868d75effSDimitry Andric #endif 82968d75effSDimitry Andric } 83068d75effSDimitry Andric 83168d75effSDimitry Andric ALWAYS_INLINE USED 83268d75effSDimitry Andric void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 83368d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 83468d75effSDimitry Andric u64 *shadow_mem = (u64*)MemToShadow(addr); 83568d75effSDimitry Andric DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 83668d75effSDimitry Andric " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 83768d75effSDimitry Andric (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 83868d75effSDimitry Andric (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 83968d75effSDimitry Andric (uptr)shadow_mem[0], (uptr)shadow_mem[1], 84068d75effSDimitry Andric (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 84168d75effSDimitry Andric #if SANITIZER_DEBUG 84268d75effSDimitry Andric if (!IsAppMem(addr)) { 84368d75effSDimitry Andric Printf("Access to non app mem %zx\n", addr); 84468d75effSDimitry Andric DCHECK(IsAppMem(addr)); 84568d75effSDimitry Andric } 84668d75effSDimitry Andric if (!IsShadowMem((uptr)shadow_mem)) { 84768d75effSDimitry Andric Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 84868d75effSDimitry Andric DCHECK(IsShadowMem((uptr)shadow_mem)); 84968d75effSDimitry Andric } 85068d75effSDimitry Andric #endif 85168d75effSDimitry Andric 85268d75effSDimitry Andric if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) { 85368d75effSDimitry Andric // Access to .rodata section, no races here. 85468d75effSDimitry Andric // Measurements show that it can be 10-20% of all memory accesses. 85568d75effSDimitry Andric StatInc(thr, StatMop); 85668d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 85768d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 85868d75effSDimitry Andric StatInc(thr, StatMopRodata); 85968d75effSDimitry Andric return; 86068d75effSDimitry Andric } 86168d75effSDimitry Andric 86268d75effSDimitry Andric FastState fast_state = thr->fast_state; 86368d75effSDimitry Andric if (UNLIKELY(fast_state.GetIgnoreBit())) { 86468d75effSDimitry Andric StatInc(thr, StatMop); 86568d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 86668d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 86768d75effSDimitry Andric StatInc(thr, StatMopIgnored); 86868d75effSDimitry Andric return; 86968d75effSDimitry Andric } 87068d75effSDimitry Andric 87168d75effSDimitry Andric Shadow cur(fast_state); 87268d75effSDimitry Andric cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 87368d75effSDimitry Andric cur.SetWrite(kAccessIsWrite); 87468d75effSDimitry Andric cur.SetAtomic(kIsAtomic); 87568d75effSDimitry Andric 87668d75effSDimitry Andric if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 87768d75effSDimitry Andric thr->fast_synch_epoch, kAccessIsWrite))) { 87868d75effSDimitry Andric StatInc(thr, StatMop); 87968d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 88068d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 88168d75effSDimitry Andric StatInc(thr, StatMopSame); 88268d75effSDimitry Andric return; 88368d75effSDimitry Andric } 88468d75effSDimitry Andric 88568d75effSDimitry Andric if (kCollectHistory) { 88668d75effSDimitry Andric fast_state.IncrementEpoch(); 88768d75effSDimitry Andric thr->fast_state = fast_state; 88868d75effSDimitry Andric TraceAddEvent(thr, fast_state, EventTypeMop, pc); 88968d75effSDimitry Andric cur.IncrementEpoch(); 89068d75effSDimitry Andric } 89168d75effSDimitry Andric 89268d75effSDimitry Andric MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 89368d75effSDimitry Andric shadow_mem, cur); 89468d75effSDimitry Andric } 89568d75effSDimitry Andric 89668d75effSDimitry Andric // Called by MemoryAccessRange in tsan_rtl_thread.cpp 89768d75effSDimitry Andric ALWAYS_INLINE USED 89868d75effSDimitry Andric void MemoryAccessImpl(ThreadState *thr, uptr addr, 89968d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 90068d75effSDimitry Andric u64 *shadow_mem, Shadow cur) { 90168d75effSDimitry Andric if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 90268d75effSDimitry Andric thr->fast_synch_epoch, kAccessIsWrite))) { 90368d75effSDimitry Andric StatInc(thr, StatMop); 90468d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 90568d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 90668d75effSDimitry Andric StatInc(thr, StatMopSame); 90768d75effSDimitry Andric return; 90868d75effSDimitry Andric } 90968d75effSDimitry Andric 91068d75effSDimitry Andric MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 91168d75effSDimitry Andric shadow_mem, cur); 91268d75effSDimitry Andric } 91368d75effSDimitry Andric 91468d75effSDimitry Andric static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 91568d75effSDimitry Andric u64 val) { 91668d75effSDimitry Andric (void)thr; 91768d75effSDimitry Andric (void)pc; 91868d75effSDimitry Andric if (size == 0) 91968d75effSDimitry Andric return; 92068d75effSDimitry Andric // FIXME: fix me. 92168d75effSDimitry Andric uptr offset = addr % kShadowCell; 92268d75effSDimitry Andric if (offset) { 92368d75effSDimitry Andric offset = kShadowCell - offset; 92468d75effSDimitry Andric if (size <= offset) 92568d75effSDimitry Andric return; 92668d75effSDimitry Andric addr += offset; 92768d75effSDimitry Andric size -= offset; 92868d75effSDimitry Andric } 92968d75effSDimitry Andric DCHECK_EQ(addr % 8, 0); 93068d75effSDimitry Andric // If a user passes some insane arguments (memset(0)), 93168d75effSDimitry Andric // let it just crash as usual. 93268d75effSDimitry Andric if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 93368d75effSDimitry Andric return; 93468d75effSDimitry Andric // Don't want to touch lots of shadow memory. 93568d75effSDimitry Andric // If a program maps 10MB stack, there is no need reset the whole range. 93668d75effSDimitry Andric size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 93768d75effSDimitry Andric // UnmapOrDie/MmapFixedNoReserve does not work on Windows. 93868d75effSDimitry Andric if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { 93968d75effSDimitry Andric u64 *p = (u64*)MemToShadow(addr); 94068d75effSDimitry Andric CHECK(IsShadowMem((uptr)p)); 94168d75effSDimitry Andric CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 94268d75effSDimitry Andric // FIXME: may overwrite a part outside the region 94368d75effSDimitry Andric for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 94468d75effSDimitry Andric p[i++] = val; 94568d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 94668d75effSDimitry Andric p[i++] = 0; 94768d75effSDimitry Andric } 94868d75effSDimitry Andric } else { 94968d75effSDimitry Andric // The region is big, reset only beginning and end. 95068d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 95168d75effSDimitry Andric u64 *begin = (u64*)MemToShadow(addr); 95268d75effSDimitry Andric u64 *end = begin + size / kShadowCell * kShadowCnt; 95368d75effSDimitry Andric u64 *p = begin; 95468d75effSDimitry Andric // Set at least first kPageSize/2 to page boundary. 95568d75effSDimitry Andric while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 95668d75effSDimitry Andric *p++ = val; 95768d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 95868d75effSDimitry Andric *p++ = 0; 95968d75effSDimitry Andric } 96068d75effSDimitry Andric // Reset middle part. 96168d75effSDimitry Andric u64 *p1 = p; 96268d75effSDimitry Andric p = RoundDown(end, kPageSize); 96368d75effSDimitry Andric UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); 964*e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1)) 96568d75effSDimitry Andric Die(); 96668d75effSDimitry Andric // Set the ending. 96768d75effSDimitry Andric while (p < end) { 96868d75effSDimitry Andric *p++ = val; 96968d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 97068d75effSDimitry Andric *p++ = 0; 97168d75effSDimitry Andric } 97268d75effSDimitry Andric } 97368d75effSDimitry Andric } 97468d75effSDimitry Andric 97568d75effSDimitry Andric void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 97668d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, 0); 97768d75effSDimitry Andric } 97868d75effSDimitry Andric 97968d75effSDimitry Andric void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 98068d75effSDimitry Andric // Processing more than 1k (4k of shadow) is expensive, 98168d75effSDimitry Andric // can cause excessive memory consumption (user does not necessary touch 98268d75effSDimitry Andric // the whole range) and most likely unnecessary. 98368d75effSDimitry Andric if (size > 1024) 98468d75effSDimitry Andric size = 1024; 98568d75effSDimitry Andric CHECK_EQ(thr->is_freeing, false); 98668d75effSDimitry Andric thr->is_freeing = true; 98768d75effSDimitry Andric MemoryAccessRange(thr, pc, addr, size, true); 98868d75effSDimitry Andric thr->is_freeing = false; 98968d75effSDimitry Andric if (kCollectHistory) { 99068d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 99168d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 99268d75effSDimitry Andric } 99368d75effSDimitry Andric Shadow s(thr->fast_state); 99468d75effSDimitry Andric s.ClearIgnoreBit(); 99568d75effSDimitry Andric s.MarkAsFreed(); 99668d75effSDimitry Andric s.SetWrite(true); 99768d75effSDimitry Andric s.SetAddr0AndSizeLog(0, 3); 99868d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, s.raw()); 99968d75effSDimitry Andric } 100068d75effSDimitry Andric 100168d75effSDimitry Andric void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 100268d75effSDimitry Andric if (kCollectHistory) { 100368d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 100468d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 100568d75effSDimitry Andric } 100668d75effSDimitry Andric Shadow s(thr->fast_state); 100768d75effSDimitry Andric s.ClearIgnoreBit(); 100868d75effSDimitry Andric s.SetWrite(true); 100968d75effSDimitry Andric s.SetAddr0AndSizeLog(0, 3); 101068d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, s.raw()); 101168d75effSDimitry Andric } 101268d75effSDimitry Andric 101368d75effSDimitry Andric void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, 101468d75effSDimitry Andric uptr size) { 101568d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) 101668d75effSDimitry Andric MemoryRangeImitateWrite(thr, pc, addr, size); 101768d75effSDimitry Andric else 101868d75effSDimitry Andric MemoryResetRange(thr, pc, addr, size); 101968d75effSDimitry Andric } 102068d75effSDimitry Andric 102168d75effSDimitry Andric ALWAYS_INLINE USED 102268d75effSDimitry Andric void FuncEntry(ThreadState *thr, uptr pc) { 102368d75effSDimitry Andric StatInc(thr, StatFuncEnter); 102468d75effSDimitry Andric DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 102568d75effSDimitry Andric if (kCollectHistory) { 102668d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 102768d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 102868d75effSDimitry Andric } 102968d75effSDimitry Andric 103068d75effSDimitry Andric // Shadow stack maintenance can be replaced with 103168d75effSDimitry Andric // stack unwinding during trace switch (which presumably must be faster). 103268d75effSDimitry Andric DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); 103368d75effSDimitry Andric #if !SANITIZER_GO 103468d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 103568d75effSDimitry Andric #else 103668d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 103768d75effSDimitry Andric GrowShadowStack(thr); 103868d75effSDimitry Andric #endif 103968d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 104068d75effSDimitry Andric thr->shadow_stack_pos++; 104168d75effSDimitry Andric } 104268d75effSDimitry Andric 104368d75effSDimitry Andric ALWAYS_INLINE USED 104468d75effSDimitry Andric void FuncExit(ThreadState *thr) { 104568d75effSDimitry Andric StatInc(thr, StatFuncExit); 104668d75effSDimitry Andric DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 104768d75effSDimitry Andric if (kCollectHistory) { 104868d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 104968d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 105068d75effSDimitry Andric } 105168d75effSDimitry Andric 105268d75effSDimitry Andric DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); 105368d75effSDimitry Andric #if !SANITIZER_GO 105468d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 105568d75effSDimitry Andric #endif 105668d75effSDimitry Andric thr->shadow_stack_pos--; 105768d75effSDimitry Andric } 105868d75effSDimitry Andric 105968d75effSDimitry Andric void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { 106068d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 106168d75effSDimitry Andric thr->ignore_reads_and_writes++; 106268d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 106368d75effSDimitry Andric thr->fast_state.SetIgnoreBit(); 106468d75effSDimitry Andric #if !SANITIZER_GO 106568d75effSDimitry Andric if (save_stack && !ctx->after_multithreaded_fork) 106668d75effSDimitry Andric thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 106768d75effSDimitry Andric #endif 106868d75effSDimitry Andric } 106968d75effSDimitry Andric 107068d75effSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { 107168d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 107268d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 107368d75effSDimitry Andric thr->ignore_reads_and_writes--; 107468d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) { 107568d75effSDimitry Andric thr->fast_state.ClearIgnoreBit(); 107668d75effSDimitry Andric #if !SANITIZER_GO 107768d75effSDimitry Andric thr->mop_ignore_set.Reset(); 107868d75effSDimitry Andric #endif 107968d75effSDimitry Andric } 108068d75effSDimitry Andric } 108168d75effSDimitry Andric 108268d75effSDimitry Andric #if !SANITIZER_GO 108368d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE 108468d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() { 108568d75effSDimitry Andric ThreadState *thr = cur_thread(); 108668d75effSDimitry Andric return thr->shadow_stack_pos - thr->shadow_stack; 108768d75effSDimitry Andric } 108868d75effSDimitry Andric #endif 108968d75effSDimitry Andric 109068d75effSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { 109168d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 109268d75effSDimitry Andric thr->ignore_sync++; 109368d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 109468d75effSDimitry Andric #if !SANITIZER_GO 109568d75effSDimitry Andric if (save_stack && !ctx->after_multithreaded_fork) 109668d75effSDimitry Andric thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 109768d75effSDimitry Andric #endif 109868d75effSDimitry Andric } 109968d75effSDimitry Andric 110068d75effSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { 110168d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 110268d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 110368d75effSDimitry Andric thr->ignore_sync--; 110468d75effSDimitry Andric #if !SANITIZER_GO 110568d75effSDimitry Andric if (thr->ignore_sync == 0) 110668d75effSDimitry Andric thr->sync_ignore_set.Reset(); 110768d75effSDimitry Andric #endif 110868d75effSDimitry Andric } 110968d75effSDimitry Andric 111068d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const { 111168d75effSDimitry Andric return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 111268d75effSDimitry Andric } 111368d75effSDimitry Andric 111468d75effSDimitry Andric #if SANITIZER_DEBUG 111568d75effSDimitry Andric void build_consistency_debug() {} 111668d75effSDimitry Andric #else 111768d75effSDimitry Andric void build_consistency_release() {} 111868d75effSDimitry Andric #endif 111968d75effSDimitry Andric 112068d75effSDimitry Andric #if TSAN_COLLECT_STATS 112168d75effSDimitry Andric void build_consistency_stats() {} 112268d75effSDimitry Andric #else 112368d75effSDimitry Andric void build_consistency_nostats() {} 112468d75effSDimitry Andric #endif 112568d75effSDimitry Andric 112668d75effSDimitry Andric } // namespace __tsan 112768d75effSDimitry Andric 112868d75effSDimitry Andric #if !SANITIZER_GO 112968d75effSDimitry Andric // Must be included in this file to make sure everything is inlined. 113068d75effSDimitry Andric #include "tsan_interface_inl.h" 113168d75effSDimitry Andric #endif 1132