1*68d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===// 2*68d75effSDimitry Andric // 3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*68d75effSDimitry Andric // 7*68d75effSDimitry Andric //===----------------------------------------------------------------------===// 8*68d75effSDimitry Andric // 9*68d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 10*68d75effSDimitry Andric // 11*68d75effSDimitry Andric // Main file (entry points) for the TSan run-time. 12*68d75effSDimitry Andric //===----------------------------------------------------------------------===// 13*68d75effSDimitry Andric 14*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h" 15*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h" 16*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h" 17*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h" 18*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 19*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 20*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h" 21*68d75effSDimitry Andric #include "tsan_defs.h" 22*68d75effSDimitry Andric #include "tsan_platform.h" 23*68d75effSDimitry Andric #include "tsan_rtl.h" 24*68d75effSDimitry Andric #include "tsan_mman.h" 25*68d75effSDimitry Andric #include "tsan_suppressions.h" 26*68d75effSDimitry Andric #include "tsan_symbolize.h" 27*68d75effSDimitry Andric #include "ubsan/ubsan_init.h" 28*68d75effSDimitry Andric 29*68d75effSDimitry Andric #ifdef __SSE3__ 30*68d75effSDimitry Andric // <emmintrin.h> transitively includes <stdlib.h>, 31*68d75effSDimitry Andric // and it's prohibited to include std headers into tsan runtime. 32*68d75effSDimitry Andric // So we do this dirty trick. 33*68d75effSDimitry Andric #define _MM_MALLOC_H_INCLUDED 34*68d75effSDimitry Andric #define __MM_MALLOC_H 35*68d75effSDimitry Andric #include <emmintrin.h> 36*68d75effSDimitry Andric typedef __m128i m128; 37*68d75effSDimitry Andric #endif 38*68d75effSDimitry Andric 39*68d75effSDimitry Andric volatile int __tsan_resumed = 0; 40*68d75effSDimitry Andric 41*68d75effSDimitry Andric extern "C" void __tsan_resume() { 42*68d75effSDimitry Andric __tsan_resumed = 1; 43*68d75effSDimitry Andric } 44*68d75effSDimitry Andric 45*68d75effSDimitry Andric namespace __tsan { 46*68d75effSDimitry Andric 47*68d75effSDimitry Andric #if !SANITIZER_GO && !SANITIZER_MAC 48*68d75effSDimitry Andric __attribute__((tls_model("initial-exec"))) 49*68d75effSDimitry Andric THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64); 50*68d75effSDimitry Andric #endif 51*68d75effSDimitry Andric static char ctx_placeholder[sizeof(Context)] ALIGNED(64); 52*68d75effSDimitry Andric Context *ctx; 53*68d75effSDimitry Andric 54*68d75effSDimitry Andric // Can be overriden by a front-end. 55*68d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS 56*68d75effSDimitry Andric bool OnFinalize(bool failed); 57*68d75effSDimitry Andric void OnInitialize(); 58*68d75effSDimitry Andric #else 59*68d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 60*68d75effSDimitry Andric bool OnFinalize(bool failed) { 61*68d75effSDimitry Andric return failed; 62*68d75effSDimitry Andric } 63*68d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 64*68d75effSDimitry Andric void OnInitialize() {} 65*68d75effSDimitry Andric #endif 66*68d75effSDimitry Andric 67*68d75effSDimitry Andric static char thread_registry_placeholder[sizeof(ThreadRegistry)]; 68*68d75effSDimitry Andric 69*68d75effSDimitry Andric static ThreadContextBase *CreateThreadContext(u32 tid) { 70*68d75effSDimitry Andric // Map thread trace when context is created. 71*68d75effSDimitry Andric char name[50]; 72*68d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace %u", tid); 73*68d75effSDimitry Andric MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name); 74*68d75effSDimitry Andric const uptr hdr = GetThreadTraceHeader(tid); 75*68d75effSDimitry Andric internal_snprintf(name, sizeof(name), "trace header %u", tid); 76*68d75effSDimitry Andric MapThreadTrace(hdr, sizeof(Trace), name); 77*68d75effSDimitry Andric new((void*)hdr) Trace(); 78*68d75effSDimitry Andric // We are going to use only a small part of the trace with the default 79*68d75effSDimitry Andric // value of history_size. However, the constructor writes to the whole trace. 80*68d75effSDimitry Andric // Unmap the unused part. 81*68d75effSDimitry Andric uptr hdr_end = hdr + sizeof(Trace); 82*68d75effSDimitry Andric hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts()); 83*68d75effSDimitry Andric hdr_end = RoundUp(hdr_end, GetPageSizeCached()); 84*68d75effSDimitry Andric if (hdr_end < hdr + sizeof(Trace)) 85*68d75effSDimitry Andric UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end); 86*68d75effSDimitry Andric void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext)); 87*68d75effSDimitry Andric return new(mem) ThreadContext(tid); 88*68d75effSDimitry Andric } 89*68d75effSDimitry Andric 90*68d75effSDimitry Andric #if !SANITIZER_GO 91*68d75effSDimitry Andric static const u32 kThreadQuarantineSize = 16; 92*68d75effSDimitry Andric #else 93*68d75effSDimitry Andric static const u32 kThreadQuarantineSize = 64; 94*68d75effSDimitry Andric #endif 95*68d75effSDimitry Andric 96*68d75effSDimitry Andric Context::Context() 97*68d75effSDimitry Andric : initialized() 98*68d75effSDimitry Andric , report_mtx(MutexTypeReport, StatMtxReport) 99*68d75effSDimitry Andric , nreported() 100*68d75effSDimitry Andric , nmissed_expected() 101*68d75effSDimitry Andric , thread_registry(new(thread_registry_placeholder) ThreadRegistry( 102*68d75effSDimitry Andric CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse)) 103*68d75effSDimitry Andric , racy_mtx(MutexTypeRacy, StatMtxRacy) 104*68d75effSDimitry Andric , racy_stacks() 105*68d75effSDimitry Andric , racy_addresses() 106*68d75effSDimitry Andric , fired_suppressions_mtx(MutexTypeFired, StatMtxFired) 107*68d75effSDimitry Andric , clock_alloc("clock allocator") { 108*68d75effSDimitry Andric fired_suppressions.reserve(8); 109*68d75effSDimitry Andric } 110*68d75effSDimitry Andric 111*68d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization. 112*68d75effSDimitry Andric ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch, 113*68d75effSDimitry Andric unsigned reuse_count, 114*68d75effSDimitry Andric uptr stk_addr, uptr stk_size, 115*68d75effSDimitry Andric uptr tls_addr, uptr tls_size) 116*68d75effSDimitry Andric : fast_state(tid, epoch) 117*68d75effSDimitry Andric // Do not touch these, rely on zero initialization, 118*68d75effSDimitry Andric // they may be accessed before the ctor. 119*68d75effSDimitry Andric // , ignore_reads_and_writes() 120*68d75effSDimitry Andric // , ignore_interceptors() 121*68d75effSDimitry Andric , clock(tid, reuse_count) 122*68d75effSDimitry Andric #if !SANITIZER_GO 123*68d75effSDimitry Andric , jmp_bufs() 124*68d75effSDimitry Andric #endif 125*68d75effSDimitry Andric , tid(tid) 126*68d75effSDimitry Andric , unique_id(unique_id) 127*68d75effSDimitry Andric , stk_addr(stk_addr) 128*68d75effSDimitry Andric , stk_size(stk_size) 129*68d75effSDimitry Andric , tls_addr(tls_addr) 130*68d75effSDimitry Andric , tls_size(tls_size) 131*68d75effSDimitry Andric #if !SANITIZER_GO 132*68d75effSDimitry Andric , last_sleep_clock(tid) 133*68d75effSDimitry Andric #endif 134*68d75effSDimitry Andric { 135*68d75effSDimitry Andric } 136*68d75effSDimitry Andric 137*68d75effSDimitry Andric #if !SANITIZER_GO 138*68d75effSDimitry Andric static void MemoryProfiler(Context *ctx, fd_t fd, int i) { 139*68d75effSDimitry Andric uptr n_threads; 140*68d75effSDimitry Andric uptr n_running_threads; 141*68d75effSDimitry Andric ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads); 142*68d75effSDimitry Andric InternalMmapVector<char> buf(4096); 143*68d75effSDimitry Andric WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads); 144*68d75effSDimitry Andric WriteToFile(fd, buf.data(), internal_strlen(buf.data())); 145*68d75effSDimitry Andric } 146*68d75effSDimitry Andric 147*68d75effSDimitry Andric static void BackgroundThread(void *arg) { 148*68d75effSDimitry Andric // This is a non-initialized non-user thread, nothing to see here. 149*68d75effSDimitry Andric // We don't use ScopedIgnoreInterceptors, because we want ignores to be 150*68d75effSDimitry Andric // enabled even when the thread function exits (e.g. during pthread thread 151*68d75effSDimitry Andric // shutdown code). 152*68d75effSDimitry Andric cur_thread_init(); 153*68d75effSDimitry Andric cur_thread()->ignore_interceptors++; 154*68d75effSDimitry Andric const u64 kMs2Ns = 1000 * 1000; 155*68d75effSDimitry Andric 156*68d75effSDimitry Andric fd_t mprof_fd = kInvalidFd; 157*68d75effSDimitry Andric if (flags()->profile_memory && flags()->profile_memory[0]) { 158*68d75effSDimitry Andric if (internal_strcmp(flags()->profile_memory, "stdout") == 0) { 159*68d75effSDimitry Andric mprof_fd = 1; 160*68d75effSDimitry Andric } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) { 161*68d75effSDimitry Andric mprof_fd = 2; 162*68d75effSDimitry Andric } else { 163*68d75effSDimitry Andric InternalScopedString filename(kMaxPathLength); 164*68d75effSDimitry Andric filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid()); 165*68d75effSDimitry Andric fd_t fd = OpenFile(filename.data(), WrOnly); 166*68d75effSDimitry Andric if (fd == kInvalidFd) { 167*68d75effSDimitry Andric Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 168*68d75effSDimitry Andric &filename[0]); 169*68d75effSDimitry Andric } else { 170*68d75effSDimitry Andric mprof_fd = fd; 171*68d75effSDimitry Andric } 172*68d75effSDimitry Andric } 173*68d75effSDimitry Andric } 174*68d75effSDimitry Andric 175*68d75effSDimitry Andric u64 last_flush = NanoTime(); 176*68d75effSDimitry Andric uptr last_rss = 0; 177*68d75effSDimitry Andric for (int i = 0; 178*68d75effSDimitry Andric atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0; 179*68d75effSDimitry Andric i++) { 180*68d75effSDimitry Andric SleepForMillis(100); 181*68d75effSDimitry Andric u64 now = NanoTime(); 182*68d75effSDimitry Andric 183*68d75effSDimitry Andric // Flush memory if requested. 184*68d75effSDimitry Andric if (flags()->flush_memory_ms > 0) { 185*68d75effSDimitry Andric if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 186*68d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: periodic memory flush\n"); 187*68d75effSDimitry Andric FlushShadowMemory(); 188*68d75effSDimitry Andric last_flush = NanoTime(); 189*68d75effSDimitry Andric } 190*68d75effSDimitry Andric } 191*68d75effSDimitry Andric // GetRSS can be expensive on huge programs, so don't do it every 100ms. 192*68d75effSDimitry Andric if (flags()->memory_limit_mb > 0) { 193*68d75effSDimitry Andric uptr rss = GetRSS(); 194*68d75effSDimitry Andric uptr limit = uptr(flags()->memory_limit_mb) << 20; 195*68d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flush check" 196*68d75effSDimitry Andric " RSS=%llu LAST=%llu LIMIT=%llu\n", 197*68d75effSDimitry Andric (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 198*68d75effSDimitry Andric if (2 * rss > limit + last_rss) { 199*68d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n"); 200*68d75effSDimitry Andric FlushShadowMemory(); 201*68d75effSDimitry Andric rss = GetRSS(); 202*68d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20); 203*68d75effSDimitry Andric } 204*68d75effSDimitry Andric last_rss = rss; 205*68d75effSDimitry Andric } 206*68d75effSDimitry Andric 207*68d75effSDimitry Andric // Write memory profile if requested. 208*68d75effSDimitry Andric if (mprof_fd != kInvalidFd) 209*68d75effSDimitry Andric MemoryProfiler(ctx, mprof_fd, i); 210*68d75effSDimitry Andric 211*68d75effSDimitry Andric // Flush symbolizer cache if requested. 212*68d75effSDimitry Andric if (flags()->flush_symbolizer_ms > 0) { 213*68d75effSDimitry Andric u64 last = atomic_load(&ctx->last_symbolize_time_ns, 214*68d75effSDimitry Andric memory_order_relaxed); 215*68d75effSDimitry Andric if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 216*68d75effSDimitry Andric Lock l(&ctx->report_mtx); 217*68d75effSDimitry Andric ScopedErrorReportLock l2; 218*68d75effSDimitry Andric SymbolizeFlush(); 219*68d75effSDimitry Andric atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 220*68d75effSDimitry Andric } 221*68d75effSDimitry Andric } 222*68d75effSDimitry Andric } 223*68d75effSDimitry Andric } 224*68d75effSDimitry Andric 225*68d75effSDimitry Andric static void StartBackgroundThread() { 226*68d75effSDimitry Andric ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 227*68d75effSDimitry Andric } 228*68d75effSDimitry Andric 229*68d75effSDimitry Andric #ifndef __mips__ 230*68d75effSDimitry Andric static void StopBackgroundThread() { 231*68d75effSDimitry Andric atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 232*68d75effSDimitry Andric internal_join_thread(ctx->background_thread); 233*68d75effSDimitry Andric ctx->background_thread = 0; 234*68d75effSDimitry Andric } 235*68d75effSDimitry Andric #endif 236*68d75effSDimitry Andric #endif 237*68d75effSDimitry Andric 238*68d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) { 239*68d75effSDimitry Andric ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); 240*68d75effSDimitry Andric } 241*68d75effSDimitry Andric 242*68d75effSDimitry Andric #if !SANITIZER_GO 243*68d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 244*68d75effSDimitry Andric if (size == 0) return; 245*68d75effSDimitry Andric DontNeedShadowFor(addr, size); 246*68d75effSDimitry Andric ScopedGlobalProcessor sgp; 247*68d75effSDimitry Andric ctx->metamap.ResetRange(thr->proc(), addr, size); 248*68d75effSDimitry Andric } 249*68d75effSDimitry Andric #endif 250*68d75effSDimitry Andric 251*68d75effSDimitry Andric void MapShadow(uptr addr, uptr size) { 252*68d75effSDimitry Andric // Global data is not 64K aligned, but there are no adjacent mappings, 253*68d75effSDimitry Andric // so we can get away with unaligned mapping. 254*68d75effSDimitry Andric // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 255*68d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 256*68d75effSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 257*68d75effSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 258*68d75effSDimitry Andric if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow")) 259*68d75effSDimitry Andric Die(); 260*68d75effSDimitry Andric 261*68d75effSDimitry Andric // Meta shadow is 2:1, so tread carefully. 262*68d75effSDimitry Andric static bool data_mapped = false; 263*68d75effSDimitry Andric static uptr mapped_meta_end = 0; 264*68d75effSDimitry Andric uptr meta_begin = (uptr)MemToMeta(addr); 265*68d75effSDimitry Andric uptr meta_end = (uptr)MemToMeta(addr + size); 266*68d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 267*68d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 268*68d75effSDimitry Andric if (!data_mapped) { 269*68d75effSDimitry Andric // First call maps data+bss. 270*68d75effSDimitry Andric data_mapped = true; 271*68d75effSDimitry Andric if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) 272*68d75effSDimitry Andric Die(); 273*68d75effSDimitry Andric } else { 274*68d75effSDimitry Andric // Mapping continous heap. 275*68d75effSDimitry Andric // Windows wants 64K alignment. 276*68d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 277*68d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 278*68d75effSDimitry Andric if (meta_end <= mapped_meta_end) 279*68d75effSDimitry Andric return; 280*68d75effSDimitry Andric if (meta_begin < mapped_meta_end) 281*68d75effSDimitry Andric meta_begin = mapped_meta_end; 282*68d75effSDimitry Andric if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow")) 283*68d75effSDimitry Andric Die(); 284*68d75effSDimitry Andric mapped_meta_end = meta_end; 285*68d75effSDimitry Andric } 286*68d75effSDimitry Andric VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n", 287*68d75effSDimitry Andric addr, addr+size, meta_begin, meta_end); 288*68d75effSDimitry Andric } 289*68d75effSDimitry Andric 290*68d75effSDimitry Andric void MapThreadTrace(uptr addr, uptr size, const char *name) { 291*68d75effSDimitry Andric DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size); 292*68d75effSDimitry Andric CHECK_GE(addr, TraceMemBeg()); 293*68d75effSDimitry Andric CHECK_LE(addr + size, TraceMemEnd()); 294*68d75effSDimitry Andric CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 295*68d75effSDimitry Andric if (!MmapFixedNoReserve(addr, size, name)) { 296*68d75effSDimitry Andric Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n", 297*68d75effSDimitry Andric addr, size); 298*68d75effSDimitry Andric Die(); 299*68d75effSDimitry Andric } 300*68d75effSDimitry Andric } 301*68d75effSDimitry Andric 302*68d75effSDimitry Andric static void CheckShadowMapping() { 303*68d75effSDimitry Andric uptr beg, end; 304*68d75effSDimitry Andric for (int i = 0; GetUserRegion(i, &beg, &end); i++) { 305*68d75effSDimitry Andric // Skip cases for empty regions (heap definition for architectures that 306*68d75effSDimitry Andric // do not use 64-bit allocator). 307*68d75effSDimitry Andric if (beg == end) 308*68d75effSDimitry Andric continue; 309*68d75effSDimitry Andric VPrintf(3, "checking shadow region %p-%p\n", beg, end); 310*68d75effSDimitry Andric uptr prev = 0; 311*68d75effSDimitry Andric for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { 312*68d75effSDimitry Andric for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { 313*68d75effSDimitry Andric const uptr p = RoundDown(p0 + x, kShadowCell); 314*68d75effSDimitry Andric if (p < beg || p >= end) 315*68d75effSDimitry Andric continue; 316*68d75effSDimitry Andric const uptr s = MemToShadow(p); 317*68d75effSDimitry Andric const uptr m = (uptr)MemToMeta(p); 318*68d75effSDimitry Andric VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); 319*68d75effSDimitry Andric CHECK(IsAppMem(p)); 320*68d75effSDimitry Andric CHECK(IsShadowMem(s)); 321*68d75effSDimitry Andric CHECK_EQ(p, ShadowToMem(s)); 322*68d75effSDimitry Andric CHECK(IsMetaMem(m)); 323*68d75effSDimitry Andric if (prev) { 324*68d75effSDimitry Andric // Ensure that shadow and meta mappings are linear within a single 325*68d75effSDimitry Andric // user range. Lots of code that processes memory ranges assumes it. 326*68d75effSDimitry Andric const uptr prev_s = MemToShadow(prev); 327*68d75effSDimitry Andric const uptr prev_m = (uptr)MemToMeta(prev); 328*68d75effSDimitry Andric CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); 329*68d75effSDimitry Andric CHECK_EQ((m - prev_m) / kMetaShadowSize, 330*68d75effSDimitry Andric (p - prev) / kMetaShadowCell); 331*68d75effSDimitry Andric } 332*68d75effSDimitry Andric prev = p; 333*68d75effSDimitry Andric } 334*68d75effSDimitry Andric } 335*68d75effSDimitry Andric } 336*68d75effSDimitry Andric } 337*68d75effSDimitry Andric 338*68d75effSDimitry Andric #if !SANITIZER_GO 339*68d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *, 340*68d75effSDimitry Andric BufferedStackTrace *stack) { 341*68d75effSDimitry Andric stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 342*68d75effSDimitry Andric common_flags()->fast_unwind_on_fatal); 343*68d75effSDimitry Andric } 344*68d75effSDimitry Andric 345*68d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 346*68d75effSDimitry Andric HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 347*68d75effSDimitry Andric } 348*68d75effSDimitry Andric #endif 349*68d75effSDimitry Andric 350*68d75effSDimitry Andric void Initialize(ThreadState *thr) { 351*68d75effSDimitry Andric // Thread safe because done before all threads exist. 352*68d75effSDimitry Andric static bool is_initialized = false; 353*68d75effSDimitry Andric if (is_initialized) 354*68d75effSDimitry Andric return; 355*68d75effSDimitry Andric is_initialized = true; 356*68d75effSDimitry Andric // We are not ready to handle interceptors yet. 357*68d75effSDimitry Andric ScopedIgnoreInterceptors ignore; 358*68d75effSDimitry Andric SanitizerToolName = "ThreadSanitizer"; 359*68d75effSDimitry Andric // Install tool-specific callbacks in sanitizer_common. 360*68d75effSDimitry Andric SetCheckFailedCallback(TsanCheckFailed); 361*68d75effSDimitry Andric 362*68d75effSDimitry Andric ctx = new(ctx_placeholder) Context; 363*68d75effSDimitry Andric const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 364*68d75effSDimitry Andric const char *options = GetEnv(env_name); 365*68d75effSDimitry Andric CacheBinaryName(); 366*68d75effSDimitry Andric CheckASLR(); 367*68d75effSDimitry Andric InitializeFlags(&ctx->flags, options, env_name); 368*68d75effSDimitry Andric AvoidCVE_2016_2143(); 369*68d75effSDimitry Andric __sanitizer::InitializePlatformEarly(); 370*68d75effSDimitry Andric __tsan::InitializePlatformEarly(); 371*68d75effSDimitry Andric 372*68d75effSDimitry Andric #if !SANITIZER_GO 373*68d75effSDimitry Andric // Re-exec ourselves if we need to set additional env or command line args. 374*68d75effSDimitry Andric MaybeReexec(); 375*68d75effSDimitry Andric 376*68d75effSDimitry Andric InitializeAllocator(); 377*68d75effSDimitry Andric ReplaceSystemMalloc(); 378*68d75effSDimitry Andric #endif 379*68d75effSDimitry Andric if (common_flags()->detect_deadlocks) 380*68d75effSDimitry Andric ctx->dd = DDetector::Create(flags()); 381*68d75effSDimitry Andric Processor *proc = ProcCreate(); 382*68d75effSDimitry Andric ProcWire(proc, thr); 383*68d75effSDimitry Andric InitializeInterceptors(); 384*68d75effSDimitry Andric CheckShadowMapping(); 385*68d75effSDimitry Andric InitializePlatform(); 386*68d75effSDimitry Andric InitializeMutex(); 387*68d75effSDimitry Andric InitializeDynamicAnnotations(); 388*68d75effSDimitry Andric #if !SANITIZER_GO 389*68d75effSDimitry Andric InitializeShadowMemory(); 390*68d75effSDimitry Andric InitializeAllocatorLate(); 391*68d75effSDimitry Andric InstallDeadlySignalHandlers(TsanOnDeadlySignal); 392*68d75effSDimitry Andric #endif 393*68d75effSDimitry Andric // Setup correct file descriptor for error reports. 394*68d75effSDimitry Andric __sanitizer_set_report_path(common_flags()->log_path); 395*68d75effSDimitry Andric InitializeSuppressions(); 396*68d75effSDimitry Andric #if !SANITIZER_GO 397*68d75effSDimitry Andric InitializeLibIgnore(); 398*68d75effSDimitry Andric Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 399*68d75effSDimitry Andric #endif 400*68d75effSDimitry Andric 401*68d75effSDimitry Andric VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n", 402*68d75effSDimitry Andric (int)internal_getpid()); 403*68d75effSDimitry Andric 404*68d75effSDimitry Andric // Initialize thread 0. 405*68d75effSDimitry Andric int tid = ThreadCreate(thr, 0, 0, true); 406*68d75effSDimitry Andric CHECK_EQ(tid, 0); 407*68d75effSDimitry Andric ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 408*68d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN 409*68d75effSDimitry Andric __ubsan::InitAsPlugin(); 410*68d75effSDimitry Andric #endif 411*68d75effSDimitry Andric ctx->initialized = true; 412*68d75effSDimitry Andric 413*68d75effSDimitry Andric #if !SANITIZER_GO 414*68d75effSDimitry Andric Symbolizer::LateInitialize(); 415*68d75effSDimitry Andric #endif 416*68d75effSDimitry Andric 417*68d75effSDimitry Andric if (flags()->stop_on_start) { 418*68d75effSDimitry Andric Printf("ThreadSanitizer is suspended at startup (pid %d)." 419*68d75effSDimitry Andric " Call __tsan_resume().\n", 420*68d75effSDimitry Andric (int)internal_getpid()); 421*68d75effSDimitry Andric while (__tsan_resumed == 0) {} 422*68d75effSDimitry Andric } 423*68d75effSDimitry Andric 424*68d75effSDimitry Andric OnInitialize(); 425*68d75effSDimitry Andric } 426*68d75effSDimitry Andric 427*68d75effSDimitry Andric void MaybeSpawnBackgroundThread() { 428*68d75effSDimitry Andric // On MIPS, TSan initialization is run before 429*68d75effSDimitry Andric // __pthread_initialize_minimal_internal() is finished, so we can not spawn 430*68d75effSDimitry Andric // new threads. 431*68d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__) 432*68d75effSDimitry Andric static atomic_uint32_t bg_thread = {}; 433*68d75effSDimitry Andric if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 434*68d75effSDimitry Andric atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 435*68d75effSDimitry Andric StartBackgroundThread(); 436*68d75effSDimitry Andric SetSandboxingCallback(StopBackgroundThread); 437*68d75effSDimitry Andric } 438*68d75effSDimitry Andric #endif 439*68d75effSDimitry Andric } 440*68d75effSDimitry Andric 441*68d75effSDimitry Andric 442*68d75effSDimitry Andric int Finalize(ThreadState *thr) { 443*68d75effSDimitry Andric bool failed = false; 444*68d75effSDimitry Andric 445*68d75effSDimitry Andric if (common_flags()->print_module_map == 1) PrintModuleMap(); 446*68d75effSDimitry Andric 447*68d75effSDimitry Andric if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 448*68d75effSDimitry Andric SleepForMillis(flags()->atexit_sleep_ms); 449*68d75effSDimitry Andric 450*68d75effSDimitry Andric // Wait for pending reports. 451*68d75effSDimitry Andric ctx->report_mtx.Lock(); 452*68d75effSDimitry Andric { ScopedErrorReportLock l; } 453*68d75effSDimitry Andric ctx->report_mtx.Unlock(); 454*68d75effSDimitry Andric 455*68d75effSDimitry Andric #if !SANITIZER_GO 456*68d75effSDimitry Andric if (Verbosity()) AllocatorPrintStats(); 457*68d75effSDimitry Andric #endif 458*68d75effSDimitry Andric 459*68d75effSDimitry Andric ThreadFinalize(thr); 460*68d75effSDimitry Andric 461*68d75effSDimitry Andric if (ctx->nreported) { 462*68d75effSDimitry Andric failed = true; 463*68d75effSDimitry Andric #if !SANITIZER_GO 464*68d75effSDimitry Andric Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 465*68d75effSDimitry Andric #else 466*68d75effSDimitry Andric Printf("Found %d data race(s)\n", ctx->nreported); 467*68d75effSDimitry Andric #endif 468*68d75effSDimitry Andric } 469*68d75effSDimitry Andric 470*68d75effSDimitry Andric if (ctx->nmissed_expected) { 471*68d75effSDimitry Andric failed = true; 472*68d75effSDimitry Andric Printf("ThreadSanitizer: missed %d expected races\n", 473*68d75effSDimitry Andric ctx->nmissed_expected); 474*68d75effSDimitry Andric } 475*68d75effSDimitry Andric 476*68d75effSDimitry Andric if (common_flags()->print_suppressions) 477*68d75effSDimitry Andric PrintMatchedSuppressions(); 478*68d75effSDimitry Andric #if !SANITIZER_GO 479*68d75effSDimitry Andric if (flags()->print_benign) 480*68d75effSDimitry Andric PrintMatchedBenignRaces(); 481*68d75effSDimitry Andric #endif 482*68d75effSDimitry Andric 483*68d75effSDimitry Andric failed = OnFinalize(failed); 484*68d75effSDimitry Andric 485*68d75effSDimitry Andric #if TSAN_COLLECT_STATS 486*68d75effSDimitry Andric StatAggregate(ctx->stat, thr->stat); 487*68d75effSDimitry Andric StatOutput(ctx->stat); 488*68d75effSDimitry Andric #endif 489*68d75effSDimitry Andric 490*68d75effSDimitry Andric return failed ? common_flags()->exitcode : 0; 491*68d75effSDimitry Andric } 492*68d75effSDimitry Andric 493*68d75effSDimitry Andric #if !SANITIZER_GO 494*68d75effSDimitry Andric void ForkBefore(ThreadState *thr, uptr pc) { 495*68d75effSDimitry Andric ctx->thread_registry->Lock(); 496*68d75effSDimitry Andric ctx->report_mtx.Lock(); 497*68d75effSDimitry Andric } 498*68d75effSDimitry Andric 499*68d75effSDimitry Andric void ForkParentAfter(ThreadState *thr, uptr pc) { 500*68d75effSDimitry Andric ctx->report_mtx.Unlock(); 501*68d75effSDimitry Andric ctx->thread_registry->Unlock(); 502*68d75effSDimitry Andric } 503*68d75effSDimitry Andric 504*68d75effSDimitry Andric void ForkChildAfter(ThreadState *thr, uptr pc) { 505*68d75effSDimitry Andric ctx->report_mtx.Unlock(); 506*68d75effSDimitry Andric ctx->thread_registry->Unlock(); 507*68d75effSDimitry Andric 508*68d75effSDimitry Andric uptr nthread = 0; 509*68d75effSDimitry Andric ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */); 510*68d75effSDimitry Andric VPrintf(1, "ThreadSanitizer: forked new process with pid %d," 511*68d75effSDimitry Andric " parent had %d threads\n", (int)internal_getpid(), (int)nthread); 512*68d75effSDimitry Andric if (nthread == 1) { 513*68d75effSDimitry Andric StartBackgroundThread(); 514*68d75effSDimitry Andric } else { 515*68d75effSDimitry Andric // We've just forked a multi-threaded process. We cannot reasonably function 516*68d75effSDimitry Andric // after that (some mutexes may be locked before fork). So just enable 517*68d75effSDimitry Andric // ignores for everything in the hope that we will exec soon. 518*68d75effSDimitry Andric ctx->after_multithreaded_fork = true; 519*68d75effSDimitry Andric thr->ignore_interceptors++; 520*68d75effSDimitry Andric ThreadIgnoreBegin(thr, pc); 521*68d75effSDimitry Andric ThreadIgnoreSyncBegin(thr, pc); 522*68d75effSDimitry Andric } 523*68d75effSDimitry Andric } 524*68d75effSDimitry Andric #endif 525*68d75effSDimitry Andric 526*68d75effSDimitry Andric #if SANITIZER_GO 527*68d75effSDimitry Andric NOINLINE 528*68d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) { 529*68d75effSDimitry Andric const int sz = thr->shadow_stack_end - thr->shadow_stack; 530*68d75effSDimitry Andric const int newsz = 2 * sz; 531*68d75effSDimitry Andric uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack, 532*68d75effSDimitry Andric newsz * sizeof(uptr)); 533*68d75effSDimitry Andric internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 534*68d75effSDimitry Andric internal_free(thr->shadow_stack); 535*68d75effSDimitry Andric thr->shadow_stack = newstack; 536*68d75effSDimitry Andric thr->shadow_stack_pos = newstack + sz; 537*68d75effSDimitry Andric thr->shadow_stack_end = newstack + newsz; 538*68d75effSDimitry Andric } 539*68d75effSDimitry Andric #endif 540*68d75effSDimitry Andric 541*68d75effSDimitry Andric u32 CurrentStackId(ThreadState *thr, uptr pc) { 542*68d75effSDimitry Andric if (!thr->is_inited) // May happen during bootstrap. 543*68d75effSDimitry Andric return 0; 544*68d75effSDimitry Andric if (pc != 0) { 545*68d75effSDimitry Andric #if !SANITIZER_GO 546*68d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 547*68d75effSDimitry Andric #else 548*68d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 549*68d75effSDimitry Andric GrowShadowStack(thr); 550*68d75effSDimitry Andric #endif 551*68d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 552*68d75effSDimitry Andric thr->shadow_stack_pos++; 553*68d75effSDimitry Andric } 554*68d75effSDimitry Andric u32 id = StackDepotPut( 555*68d75effSDimitry Andric StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 556*68d75effSDimitry Andric if (pc != 0) 557*68d75effSDimitry Andric thr->shadow_stack_pos--; 558*68d75effSDimitry Andric return id; 559*68d75effSDimitry Andric } 560*68d75effSDimitry Andric 561*68d75effSDimitry Andric void TraceSwitch(ThreadState *thr) { 562*68d75effSDimitry Andric #if !SANITIZER_GO 563*68d75effSDimitry Andric if (ctx->after_multithreaded_fork) 564*68d75effSDimitry Andric return; 565*68d75effSDimitry Andric #endif 566*68d75effSDimitry Andric thr->nomalloc++; 567*68d75effSDimitry Andric Trace *thr_trace = ThreadTrace(thr->tid); 568*68d75effSDimitry Andric Lock l(&thr_trace->mtx); 569*68d75effSDimitry Andric unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts(); 570*68d75effSDimitry Andric TraceHeader *hdr = &thr_trace->headers[trace]; 571*68d75effSDimitry Andric hdr->epoch0 = thr->fast_state.epoch(); 572*68d75effSDimitry Andric ObtainCurrentStack(thr, 0, &hdr->stack0); 573*68d75effSDimitry Andric hdr->mset0 = thr->mset; 574*68d75effSDimitry Andric thr->nomalloc--; 575*68d75effSDimitry Andric } 576*68d75effSDimitry Andric 577*68d75effSDimitry Andric Trace *ThreadTrace(int tid) { 578*68d75effSDimitry Andric return (Trace*)GetThreadTraceHeader(tid); 579*68d75effSDimitry Andric } 580*68d75effSDimitry Andric 581*68d75effSDimitry Andric uptr TraceTopPC(ThreadState *thr) { 582*68d75effSDimitry Andric Event *events = (Event*)GetThreadTrace(thr->tid); 583*68d75effSDimitry Andric uptr pc = events[thr->fast_state.GetTracePos()]; 584*68d75effSDimitry Andric return pc; 585*68d75effSDimitry Andric } 586*68d75effSDimitry Andric 587*68d75effSDimitry Andric uptr TraceSize() { 588*68d75effSDimitry Andric return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1)); 589*68d75effSDimitry Andric } 590*68d75effSDimitry Andric 591*68d75effSDimitry Andric uptr TraceParts() { 592*68d75effSDimitry Andric return TraceSize() / kTracePartSize; 593*68d75effSDimitry Andric } 594*68d75effSDimitry Andric 595*68d75effSDimitry Andric #if !SANITIZER_GO 596*68d75effSDimitry Andric extern "C" void __tsan_trace_switch() { 597*68d75effSDimitry Andric TraceSwitch(cur_thread()); 598*68d75effSDimitry Andric } 599*68d75effSDimitry Andric 600*68d75effSDimitry Andric extern "C" void __tsan_report_race() { 601*68d75effSDimitry Andric ReportRace(cur_thread()); 602*68d75effSDimitry Andric } 603*68d75effSDimitry Andric #endif 604*68d75effSDimitry Andric 605*68d75effSDimitry Andric ALWAYS_INLINE 606*68d75effSDimitry Andric Shadow LoadShadow(u64 *p) { 607*68d75effSDimitry Andric u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed); 608*68d75effSDimitry Andric return Shadow(raw); 609*68d75effSDimitry Andric } 610*68d75effSDimitry Andric 611*68d75effSDimitry Andric ALWAYS_INLINE 612*68d75effSDimitry Andric void StoreShadow(u64 *sp, u64 s) { 613*68d75effSDimitry Andric atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed); 614*68d75effSDimitry Andric } 615*68d75effSDimitry Andric 616*68d75effSDimitry Andric ALWAYS_INLINE 617*68d75effSDimitry Andric void StoreIfNotYetStored(u64 *sp, u64 *s) { 618*68d75effSDimitry Andric StoreShadow(sp, *s); 619*68d75effSDimitry Andric *s = 0; 620*68d75effSDimitry Andric } 621*68d75effSDimitry Andric 622*68d75effSDimitry Andric ALWAYS_INLINE 623*68d75effSDimitry Andric void HandleRace(ThreadState *thr, u64 *shadow_mem, 624*68d75effSDimitry Andric Shadow cur, Shadow old) { 625*68d75effSDimitry Andric thr->racy_state[0] = cur.raw(); 626*68d75effSDimitry Andric thr->racy_state[1] = old.raw(); 627*68d75effSDimitry Andric thr->racy_shadow_addr = shadow_mem; 628*68d75effSDimitry Andric #if !SANITIZER_GO 629*68d75effSDimitry Andric HACKY_CALL(__tsan_report_race); 630*68d75effSDimitry Andric #else 631*68d75effSDimitry Andric ReportRace(thr); 632*68d75effSDimitry Andric #endif 633*68d75effSDimitry Andric } 634*68d75effSDimitry Andric 635*68d75effSDimitry Andric static inline bool HappensBefore(Shadow old, ThreadState *thr) { 636*68d75effSDimitry Andric return thr->clock.get(old.TidWithIgnore()) >= old.epoch(); 637*68d75effSDimitry Andric } 638*68d75effSDimitry Andric 639*68d75effSDimitry Andric ALWAYS_INLINE 640*68d75effSDimitry Andric void MemoryAccessImpl1(ThreadState *thr, uptr addr, 641*68d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 642*68d75effSDimitry Andric u64 *shadow_mem, Shadow cur) { 643*68d75effSDimitry Andric StatInc(thr, StatMop); 644*68d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 645*68d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 646*68d75effSDimitry Andric 647*68d75effSDimitry Andric // This potentially can live in an MMX/SSE scratch register. 648*68d75effSDimitry Andric // The required intrinsics are: 649*68d75effSDimitry Andric // __m128i _mm_move_epi64(__m128i*); 650*68d75effSDimitry Andric // _mm_storel_epi64(u64*, __m128i); 651*68d75effSDimitry Andric u64 store_word = cur.raw(); 652*68d75effSDimitry Andric bool stored = false; 653*68d75effSDimitry Andric 654*68d75effSDimitry Andric // scan all the shadow values and dispatch to 4 categories: 655*68d75effSDimitry Andric // same, replace, candidate and race (see comments below). 656*68d75effSDimitry Andric // we consider only 3 cases regarding access sizes: 657*68d75effSDimitry Andric // equal, intersect and not intersect. initially I considered 658*68d75effSDimitry Andric // larger and smaller as well, it allowed to replace some 659*68d75effSDimitry Andric // 'candidates' with 'same' or 'replace', but I think 660*68d75effSDimitry Andric // it's just not worth it (performance- and complexity-wise). 661*68d75effSDimitry Andric 662*68d75effSDimitry Andric Shadow old(0); 663*68d75effSDimitry Andric 664*68d75effSDimitry Andric // It release mode we manually unroll the loop, 665*68d75effSDimitry Andric // because empirically gcc generates better code this way. 666*68d75effSDimitry Andric // However, we can't afford unrolling in debug mode, because the function 667*68d75effSDimitry Andric // consumes almost 4K of stack. Gtest gives only 4K of stack to death test 668*68d75effSDimitry Andric // threads, which is not enough for the unrolled loop. 669*68d75effSDimitry Andric #if SANITIZER_DEBUG 670*68d75effSDimitry Andric for (int idx = 0; idx < 4; idx++) { 671*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 672*68d75effSDimitry Andric } 673*68d75effSDimitry Andric #else 674*68d75effSDimitry Andric int idx = 0; 675*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 676*68d75effSDimitry Andric idx = 1; 677*68d75effSDimitry Andric if (stored) { 678*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 679*68d75effSDimitry Andric } else { 680*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 681*68d75effSDimitry Andric } 682*68d75effSDimitry Andric idx = 2; 683*68d75effSDimitry Andric if (stored) { 684*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 685*68d75effSDimitry Andric } else { 686*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 687*68d75effSDimitry Andric } 688*68d75effSDimitry Andric idx = 3; 689*68d75effSDimitry Andric if (stored) { 690*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 691*68d75effSDimitry Andric } else { 692*68d75effSDimitry Andric #include "tsan_update_shadow_word_inl.h" 693*68d75effSDimitry Andric } 694*68d75effSDimitry Andric #endif 695*68d75effSDimitry Andric 696*68d75effSDimitry Andric // we did not find any races and had already stored 697*68d75effSDimitry Andric // the current access info, so we are done 698*68d75effSDimitry Andric if (LIKELY(stored)) 699*68d75effSDimitry Andric return; 700*68d75effSDimitry Andric // choose a random candidate slot and replace it 701*68d75effSDimitry Andric StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word); 702*68d75effSDimitry Andric StatInc(thr, StatShadowReplace); 703*68d75effSDimitry Andric return; 704*68d75effSDimitry Andric RACE: 705*68d75effSDimitry Andric HandleRace(thr, shadow_mem, cur, old); 706*68d75effSDimitry Andric return; 707*68d75effSDimitry Andric } 708*68d75effSDimitry Andric 709*68d75effSDimitry Andric void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr, 710*68d75effSDimitry Andric int size, bool kAccessIsWrite, bool kIsAtomic) { 711*68d75effSDimitry Andric while (size) { 712*68d75effSDimitry Andric int size1 = 1; 713*68d75effSDimitry Andric int kAccessSizeLog = kSizeLog1; 714*68d75effSDimitry Andric if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) { 715*68d75effSDimitry Andric size1 = 8; 716*68d75effSDimitry Andric kAccessSizeLog = kSizeLog8; 717*68d75effSDimitry Andric } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) { 718*68d75effSDimitry Andric size1 = 4; 719*68d75effSDimitry Andric kAccessSizeLog = kSizeLog4; 720*68d75effSDimitry Andric } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) { 721*68d75effSDimitry Andric size1 = 2; 722*68d75effSDimitry Andric kAccessSizeLog = kSizeLog2; 723*68d75effSDimitry Andric } 724*68d75effSDimitry Andric MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic); 725*68d75effSDimitry Andric addr += size1; 726*68d75effSDimitry Andric size -= size1; 727*68d75effSDimitry Andric } 728*68d75effSDimitry Andric } 729*68d75effSDimitry Andric 730*68d75effSDimitry Andric ALWAYS_INLINE 731*68d75effSDimitry Andric bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 732*68d75effSDimitry Andric Shadow cur(a); 733*68d75effSDimitry Andric for (uptr i = 0; i < kShadowCnt; i++) { 734*68d75effSDimitry Andric Shadow old(LoadShadow(&s[i])); 735*68d75effSDimitry Andric if (Shadow::Addr0AndSizeAreEqual(cur, old) && 736*68d75effSDimitry Andric old.TidWithIgnore() == cur.TidWithIgnore() && 737*68d75effSDimitry Andric old.epoch() > sync_epoch && 738*68d75effSDimitry Andric old.IsAtomic() == cur.IsAtomic() && 739*68d75effSDimitry Andric old.IsRead() <= cur.IsRead()) 740*68d75effSDimitry Andric return true; 741*68d75effSDimitry Andric } 742*68d75effSDimitry Andric return false; 743*68d75effSDimitry Andric } 744*68d75effSDimitry Andric 745*68d75effSDimitry Andric #if defined(__SSE3__) 746*68d75effSDimitry Andric #define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \ 747*68d75effSDimitry Andric _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \ 748*68d75effSDimitry Andric (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64)) 749*68d75effSDimitry Andric ALWAYS_INLINE 750*68d75effSDimitry Andric bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 751*68d75effSDimitry Andric // This is an optimized version of ContainsSameAccessSlow. 752*68d75effSDimitry Andric // load current access into access[0:63] 753*68d75effSDimitry Andric const m128 access = _mm_cvtsi64_si128(a); 754*68d75effSDimitry Andric // duplicate high part of access in addr0: 755*68d75effSDimitry Andric // addr0[0:31] = access[32:63] 756*68d75effSDimitry Andric // addr0[32:63] = access[32:63] 757*68d75effSDimitry Andric // addr0[64:95] = access[32:63] 758*68d75effSDimitry Andric // addr0[96:127] = access[32:63] 759*68d75effSDimitry Andric const m128 addr0 = SHUF(access, access, 1, 1, 1, 1); 760*68d75effSDimitry Andric // load 4 shadow slots 761*68d75effSDimitry Andric const m128 shadow0 = _mm_load_si128((__m128i*)s); 762*68d75effSDimitry Andric const m128 shadow1 = _mm_load_si128((__m128i*)s + 1); 763*68d75effSDimitry Andric // load high parts of 4 shadow slots into addr_vect: 764*68d75effSDimitry Andric // addr_vect[0:31] = shadow0[32:63] 765*68d75effSDimitry Andric // addr_vect[32:63] = shadow0[96:127] 766*68d75effSDimitry Andric // addr_vect[64:95] = shadow1[32:63] 767*68d75effSDimitry Andric // addr_vect[96:127] = shadow1[96:127] 768*68d75effSDimitry Andric m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3); 769*68d75effSDimitry Andric if (!is_write) { 770*68d75effSDimitry Andric // set IsRead bit in addr_vect 771*68d75effSDimitry Andric const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15); 772*68d75effSDimitry Andric const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0); 773*68d75effSDimitry Andric addr_vect = _mm_or_si128(addr_vect, rw_mask); 774*68d75effSDimitry Andric } 775*68d75effSDimitry Andric // addr0 == addr_vect? 776*68d75effSDimitry Andric const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect); 777*68d75effSDimitry Andric // epoch1[0:63] = sync_epoch 778*68d75effSDimitry Andric const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch); 779*68d75effSDimitry Andric // epoch[0:31] = sync_epoch[0:31] 780*68d75effSDimitry Andric // epoch[32:63] = sync_epoch[0:31] 781*68d75effSDimitry Andric // epoch[64:95] = sync_epoch[0:31] 782*68d75effSDimitry Andric // epoch[96:127] = sync_epoch[0:31] 783*68d75effSDimitry Andric const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0); 784*68d75effSDimitry Andric // load low parts of shadow cell epochs into epoch_vect: 785*68d75effSDimitry Andric // epoch_vect[0:31] = shadow0[0:31] 786*68d75effSDimitry Andric // epoch_vect[32:63] = shadow0[64:95] 787*68d75effSDimitry Andric // epoch_vect[64:95] = shadow1[0:31] 788*68d75effSDimitry Andric // epoch_vect[96:127] = shadow1[64:95] 789*68d75effSDimitry Andric const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2); 790*68d75effSDimitry Andric // epoch_vect >= sync_epoch? 791*68d75effSDimitry Andric const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch); 792*68d75effSDimitry Andric // addr_res & epoch_res 793*68d75effSDimitry Andric const m128 res = _mm_and_si128(addr_res, epoch_res); 794*68d75effSDimitry Andric // mask[0] = res[7] 795*68d75effSDimitry Andric // mask[1] = res[15] 796*68d75effSDimitry Andric // ... 797*68d75effSDimitry Andric // mask[15] = res[127] 798*68d75effSDimitry Andric const int mask = _mm_movemask_epi8(res); 799*68d75effSDimitry Andric return mask != 0; 800*68d75effSDimitry Andric } 801*68d75effSDimitry Andric #endif 802*68d75effSDimitry Andric 803*68d75effSDimitry Andric ALWAYS_INLINE 804*68d75effSDimitry Andric bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) { 805*68d75effSDimitry Andric #if defined(__SSE3__) 806*68d75effSDimitry Andric bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write); 807*68d75effSDimitry Andric // NOTE: this check can fail if the shadow is concurrently mutated 808*68d75effSDimitry Andric // by other threads. But it still can be useful if you modify 809*68d75effSDimitry Andric // ContainsSameAccessFast and want to ensure that it's not completely broken. 810*68d75effSDimitry Andric // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write)); 811*68d75effSDimitry Andric return res; 812*68d75effSDimitry Andric #else 813*68d75effSDimitry Andric return ContainsSameAccessSlow(s, a, sync_epoch, is_write); 814*68d75effSDimitry Andric #endif 815*68d75effSDimitry Andric } 816*68d75effSDimitry Andric 817*68d75effSDimitry Andric ALWAYS_INLINE USED 818*68d75effSDimitry Andric void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, 819*68d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { 820*68d75effSDimitry Andric u64 *shadow_mem = (u64*)MemToShadow(addr); 821*68d75effSDimitry Andric DPrintf2("#%d: MemoryAccess: @%p %p size=%d" 822*68d75effSDimitry Andric " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", 823*68d75effSDimitry Andric (int)thr->fast_state.tid(), (void*)pc, (void*)addr, 824*68d75effSDimitry Andric (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem, 825*68d75effSDimitry Andric (uptr)shadow_mem[0], (uptr)shadow_mem[1], 826*68d75effSDimitry Andric (uptr)shadow_mem[2], (uptr)shadow_mem[3]); 827*68d75effSDimitry Andric #if SANITIZER_DEBUG 828*68d75effSDimitry Andric if (!IsAppMem(addr)) { 829*68d75effSDimitry Andric Printf("Access to non app mem %zx\n", addr); 830*68d75effSDimitry Andric DCHECK(IsAppMem(addr)); 831*68d75effSDimitry Andric } 832*68d75effSDimitry Andric if (!IsShadowMem((uptr)shadow_mem)) { 833*68d75effSDimitry Andric Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); 834*68d75effSDimitry Andric DCHECK(IsShadowMem((uptr)shadow_mem)); 835*68d75effSDimitry Andric } 836*68d75effSDimitry Andric #endif 837*68d75effSDimitry Andric 838*68d75effSDimitry Andric if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) { 839*68d75effSDimitry Andric // Access to .rodata section, no races here. 840*68d75effSDimitry Andric // Measurements show that it can be 10-20% of all memory accesses. 841*68d75effSDimitry Andric StatInc(thr, StatMop); 842*68d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 843*68d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 844*68d75effSDimitry Andric StatInc(thr, StatMopRodata); 845*68d75effSDimitry Andric return; 846*68d75effSDimitry Andric } 847*68d75effSDimitry Andric 848*68d75effSDimitry Andric FastState fast_state = thr->fast_state; 849*68d75effSDimitry Andric if (UNLIKELY(fast_state.GetIgnoreBit())) { 850*68d75effSDimitry Andric StatInc(thr, StatMop); 851*68d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 852*68d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 853*68d75effSDimitry Andric StatInc(thr, StatMopIgnored); 854*68d75effSDimitry Andric return; 855*68d75effSDimitry Andric } 856*68d75effSDimitry Andric 857*68d75effSDimitry Andric Shadow cur(fast_state); 858*68d75effSDimitry Andric cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog); 859*68d75effSDimitry Andric cur.SetWrite(kAccessIsWrite); 860*68d75effSDimitry Andric cur.SetAtomic(kIsAtomic); 861*68d75effSDimitry Andric 862*68d75effSDimitry Andric if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 863*68d75effSDimitry Andric thr->fast_synch_epoch, kAccessIsWrite))) { 864*68d75effSDimitry Andric StatInc(thr, StatMop); 865*68d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 866*68d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 867*68d75effSDimitry Andric StatInc(thr, StatMopSame); 868*68d75effSDimitry Andric return; 869*68d75effSDimitry Andric } 870*68d75effSDimitry Andric 871*68d75effSDimitry Andric if (kCollectHistory) { 872*68d75effSDimitry Andric fast_state.IncrementEpoch(); 873*68d75effSDimitry Andric thr->fast_state = fast_state; 874*68d75effSDimitry Andric TraceAddEvent(thr, fast_state, EventTypeMop, pc); 875*68d75effSDimitry Andric cur.IncrementEpoch(); 876*68d75effSDimitry Andric } 877*68d75effSDimitry Andric 878*68d75effSDimitry Andric MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 879*68d75effSDimitry Andric shadow_mem, cur); 880*68d75effSDimitry Andric } 881*68d75effSDimitry Andric 882*68d75effSDimitry Andric // Called by MemoryAccessRange in tsan_rtl_thread.cpp 883*68d75effSDimitry Andric ALWAYS_INLINE USED 884*68d75effSDimitry Andric void MemoryAccessImpl(ThreadState *thr, uptr addr, 885*68d75effSDimitry Andric int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic, 886*68d75effSDimitry Andric u64 *shadow_mem, Shadow cur) { 887*68d75effSDimitry Andric if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(), 888*68d75effSDimitry Andric thr->fast_synch_epoch, kAccessIsWrite))) { 889*68d75effSDimitry Andric StatInc(thr, StatMop); 890*68d75effSDimitry Andric StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead); 891*68d75effSDimitry Andric StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog)); 892*68d75effSDimitry Andric StatInc(thr, StatMopSame); 893*68d75effSDimitry Andric return; 894*68d75effSDimitry Andric } 895*68d75effSDimitry Andric 896*68d75effSDimitry Andric MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic, 897*68d75effSDimitry Andric shadow_mem, cur); 898*68d75effSDimitry Andric } 899*68d75effSDimitry Andric 900*68d75effSDimitry Andric static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size, 901*68d75effSDimitry Andric u64 val) { 902*68d75effSDimitry Andric (void)thr; 903*68d75effSDimitry Andric (void)pc; 904*68d75effSDimitry Andric if (size == 0) 905*68d75effSDimitry Andric return; 906*68d75effSDimitry Andric // FIXME: fix me. 907*68d75effSDimitry Andric uptr offset = addr % kShadowCell; 908*68d75effSDimitry Andric if (offset) { 909*68d75effSDimitry Andric offset = kShadowCell - offset; 910*68d75effSDimitry Andric if (size <= offset) 911*68d75effSDimitry Andric return; 912*68d75effSDimitry Andric addr += offset; 913*68d75effSDimitry Andric size -= offset; 914*68d75effSDimitry Andric } 915*68d75effSDimitry Andric DCHECK_EQ(addr % 8, 0); 916*68d75effSDimitry Andric // If a user passes some insane arguments (memset(0)), 917*68d75effSDimitry Andric // let it just crash as usual. 918*68d75effSDimitry Andric if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 919*68d75effSDimitry Andric return; 920*68d75effSDimitry Andric // Don't want to touch lots of shadow memory. 921*68d75effSDimitry Andric // If a program maps 10MB stack, there is no need reset the whole range. 922*68d75effSDimitry Andric size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); 923*68d75effSDimitry Andric // UnmapOrDie/MmapFixedNoReserve does not work on Windows. 924*68d75effSDimitry Andric if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { 925*68d75effSDimitry Andric u64 *p = (u64*)MemToShadow(addr); 926*68d75effSDimitry Andric CHECK(IsShadowMem((uptr)p)); 927*68d75effSDimitry Andric CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); 928*68d75effSDimitry Andric // FIXME: may overwrite a part outside the region 929*68d75effSDimitry Andric for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { 930*68d75effSDimitry Andric p[i++] = val; 931*68d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 932*68d75effSDimitry Andric p[i++] = 0; 933*68d75effSDimitry Andric } 934*68d75effSDimitry Andric } else { 935*68d75effSDimitry Andric // The region is big, reset only beginning and end. 936*68d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 937*68d75effSDimitry Andric u64 *begin = (u64*)MemToShadow(addr); 938*68d75effSDimitry Andric u64 *end = begin + size / kShadowCell * kShadowCnt; 939*68d75effSDimitry Andric u64 *p = begin; 940*68d75effSDimitry Andric // Set at least first kPageSize/2 to page boundary. 941*68d75effSDimitry Andric while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { 942*68d75effSDimitry Andric *p++ = val; 943*68d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 944*68d75effSDimitry Andric *p++ = 0; 945*68d75effSDimitry Andric } 946*68d75effSDimitry Andric // Reset middle part. 947*68d75effSDimitry Andric u64 *p1 = p; 948*68d75effSDimitry Andric p = RoundDown(end, kPageSize); 949*68d75effSDimitry Andric UnmapOrDie((void*)p1, (uptr)p - (uptr)p1); 950*68d75effSDimitry Andric if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1)) 951*68d75effSDimitry Andric Die(); 952*68d75effSDimitry Andric // Set the ending. 953*68d75effSDimitry Andric while (p < end) { 954*68d75effSDimitry Andric *p++ = val; 955*68d75effSDimitry Andric for (uptr j = 1; j < kShadowCnt; j++) 956*68d75effSDimitry Andric *p++ = 0; 957*68d75effSDimitry Andric } 958*68d75effSDimitry Andric } 959*68d75effSDimitry Andric } 960*68d75effSDimitry Andric 961*68d75effSDimitry Andric void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) { 962*68d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, 0); 963*68d75effSDimitry Andric } 964*68d75effSDimitry Andric 965*68d75effSDimitry Andric void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) { 966*68d75effSDimitry Andric // Processing more than 1k (4k of shadow) is expensive, 967*68d75effSDimitry Andric // can cause excessive memory consumption (user does not necessary touch 968*68d75effSDimitry Andric // the whole range) and most likely unnecessary. 969*68d75effSDimitry Andric if (size > 1024) 970*68d75effSDimitry Andric size = 1024; 971*68d75effSDimitry Andric CHECK_EQ(thr->is_freeing, false); 972*68d75effSDimitry Andric thr->is_freeing = true; 973*68d75effSDimitry Andric MemoryAccessRange(thr, pc, addr, size, true); 974*68d75effSDimitry Andric thr->is_freeing = false; 975*68d75effSDimitry Andric if (kCollectHistory) { 976*68d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 977*68d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 978*68d75effSDimitry Andric } 979*68d75effSDimitry Andric Shadow s(thr->fast_state); 980*68d75effSDimitry Andric s.ClearIgnoreBit(); 981*68d75effSDimitry Andric s.MarkAsFreed(); 982*68d75effSDimitry Andric s.SetWrite(true); 983*68d75effSDimitry Andric s.SetAddr0AndSizeLog(0, 3); 984*68d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, s.raw()); 985*68d75effSDimitry Andric } 986*68d75effSDimitry Andric 987*68d75effSDimitry Andric void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) { 988*68d75effSDimitry Andric if (kCollectHistory) { 989*68d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 990*68d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc); 991*68d75effSDimitry Andric } 992*68d75effSDimitry Andric Shadow s(thr->fast_state); 993*68d75effSDimitry Andric s.ClearIgnoreBit(); 994*68d75effSDimitry Andric s.SetWrite(true); 995*68d75effSDimitry Andric s.SetAddr0AndSizeLog(0, 3); 996*68d75effSDimitry Andric MemoryRangeSet(thr, pc, addr, size, s.raw()); 997*68d75effSDimitry Andric } 998*68d75effSDimitry Andric 999*68d75effSDimitry Andric void MemoryRangeImitateWriteOrResetRange(ThreadState *thr, uptr pc, uptr addr, 1000*68d75effSDimitry Andric uptr size) { 1001*68d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) 1002*68d75effSDimitry Andric MemoryRangeImitateWrite(thr, pc, addr, size); 1003*68d75effSDimitry Andric else 1004*68d75effSDimitry Andric MemoryResetRange(thr, pc, addr, size); 1005*68d75effSDimitry Andric } 1006*68d75effSDimitry Andric 1007*68d75effSDimitry Andric ALWAYS_INLINE USED 1008*68d75effSDimitry Andric void FuncEntry(ThreadState *thr, uptr pc) { 1009*68d75effSDimitry Andric StatInc(thr, StatFuncEnter); 1010*68d75effSDimitry Andric DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc); 1011*68d75effSDimitry Andric if (kCollectHistory) { 1012*68d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 1013*68d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc); 1014*68d75effSDimitry Andric } 1015*68d75effSDimitry Andric 1016*68d75effSDimitry Andric // Shadow stack maintenance can be replaced with 1017*68d75effSDimitry Andric // stack unwinding during trace switch (which presumably must be faster). 1018*68d75effSDimitry Andric DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack); 1019*68d75effSDimitry Andric #if !SANITIZER_GO 1020*68d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1021*68d75effSDimitry Andric #else 1022*68d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 1023*68d75effSDimitry Andric GrowShadowStack(thr); 1024*68d75effSDimitry Andric #endif 1025*68d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 1026*68d75effSDimitry Andric thr->shadow_stack_pos++; 1027*68d75effSDimitry Andric } 1028*68d75effSDimitry Andric 1029*68d75effSDimitry Andric ALWAYS_INLINE USED 1030*68d75effSDimitry Andric void FuncExit(ThreadState *thr) { 1031*68d75effSDimitry Andric StatInc(thr, StatFuncExit); 1032*68d75effSDimitry Andric DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid()); 1033*68d75effSDimitry Andric if (kCollectHistory) { 1034*68d75effSDimitry Andric thr->fast_state.IncrementEpoch(); 1035*68d75effSDimitry Andric TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0); 1036*68d75effSDimitry Andric } 1037*68d75effSDimitry Andric 1038*68d75effSDimitry Andric DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack); 1039*68d75effSDimitry Andric #if !SANITIZER_GO 1040*68d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 1041*68d75effSDimitry Andric #endif 1042*68d75effSDimitry Andric thr->shadow_stack_pos--; 1043*68d75effSDimitry Andric } 1044*68d75effSDimitry Andric 1045*68d75effSDimitry Andric void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) { 1046*68d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 1047*68d75effSDimitry Andric thr->ignore_reads_and_writes++; 1048*68d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 1049*68d75effSDimitry Andric thr->fast_state.SetIgnoreBit(); 1050*68d75effSDimitry Andric #if !SANITIZER_GO 1051*68d75effSDimitry Andric if (save_stack && !ctx->after_multithreaded_fork) 1052*68d75effSDimitry Andric thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 1053*68d75effSDimitry Andric #endif 1054*68d75effSDimitry Andric } 1055*68d75effSDimitry Andric 1056*68d75effSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr, uptr pc) { 1057*68d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 1058*68d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 1059*68d75effSDimitry Andric thr->ignore_reads_and_writes--; 1060*68d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) { 1061*68d75effSDimitry Andric thr->fast_state.ClearIgnoreBit(); 1062*68d75effSDimitry Andric #if !SANITIZER_GO 1063*68d75effSDimitry Andric thr->mop_ignore_set.Reset(); 1064*68d75effSDimitry Andric #endif 1065*68d75effSDimitry Andric } 1066*68d75effSDimitry Andric } 1067*68d75effSDimitry Andric 1068*68d75effSDimitry Andric #if !SANITIZER_GO 1069*68d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE 1070*68d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() { 1071*68d75effSDimitry Andric ThreadState *thr = cur_thread(); 1072*68d75effSDimitry Andric return thr->shadow_stack_pos - thr->shadow_stack; 1073*68d75effSDimitry Andric } 1074*68d75effSDimitry Andric #endif 1075*68d75effSDimitry Andric 1076*68d75effSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) { 1077*68d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 1078*68d75effSDimitry Andric thr->ignore_sync++; 1079*68d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 1080*68d75effSDimitry Andric #if !SANITIZER_GO 1081*68d75effSDimitry Andric if (save_stack && !ctx->after_multithreaded_fork) 1082*68d75effSDimitry Andric thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 1083*68d75effSDimitry Andric #endif 1084*68d75effSDimitry Andric } 1085*68d75effSDimitry Andric 1086*68d75effSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) { 1087*68d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 1088*68d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 1089*68d75effSDimitry Andric thr->ignore_sync--; 1090*68d75effSDimitry Andric #if !SANITIZER_GO 1091*68d75effSDimitry Andric if (thr->ignore_sync == 0) 1092*68d75effSDimitry Andric thr->sync_ignore_set.Reset(); 1093*68d75effSDimitry Andric #endif 1094*68d75effSDimitry Andric } 1095*68d75effSDimitry Andric 1096*68d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const { 1097*68d75effSDimitry Andric return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 1098*68d75effSDimitry Andric } 1099*68d75effSDimitry Andric 1100*68d75effSDimitry Andric #if SANITIZER_DEBUG 1101*68d75effSDimitry Andric void build_consistency_debug() {} 1102*68d75effSDimitry Andric #else 1103*68d75effSDimitry Andric void build_consistency_release() {} 1104*68d75effSDimitry Andric #endif 1105*68d75effSDimitry Andric 1106*68d75effSDimitry Andric #if TSAN_COLLECT_STATS 1107*68d75effSDimitry Andric void build_consistency_stats() {} 1108*68d75effSDimitry Andric #else 1109*68d75effSDimitry Andric void build_consistency_nostats() {} 1110*68d75effSDimitry Andric #endif 1111*68d75effSDimitry Andric 1112*68d75effSDimitry Andric } // namespace __tsan 1113*68d75effSDimitry Andric 1114*68d75effSDimitry Andric #if !SANITIZER_GO 1115*68d75effSDimitry Andric // Must be included in this file to make sure everything is inlined. 1116*68d75effSDimitry Andric #include "tsan_interface_inl.h" 1117*68d75effSDimitry Andric #endif 1118