xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp (revision 4824e7fd18a1223177218d4aec1b3c6c5c4a444e)
168d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector.
1068d75effSDimitry Andric //
1168d75effSDimitry Andric // Main file (entry points) for the TSan run-time.
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric 
14fe6060f1SDimitry Andric #include "tsan_rtl.h"
15fe6060f1SDimitry Andric 
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
21fe6060f1SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h"
2368d75effSDimitry Andric #include "tsan_defs.h"
24fe6060f1SDimitry Andric #include "tsan_interface.h"
2568d75effSDimitry Andric #include "tsan_mman.h"
26fe6060f1SDimitry Andric #include "tsan_platform.h"
2768d75effSDimitry Andric #include "tsan_suppressions.h"
2868d75effSDimitry Andric #include "tsan_symbolize.h"
2968d75effSDimitry Andric #include "ubsan/ubsan_init.h"
3068d75effSDimitry Andric 
3168d75effSDimitry Andric volatile int __tsan_resumed = 0;
3268d75effSDimitry Andric 
3368d75effSDimitry Andric extern "C" void __tsan_resume() {
3468d75effSDimitry Andric   __tsan_resumed = 1;
3568d75effSDimitry Andric }
3668d75effSDimitry Andric 
37*4824e7fdSDimitry Andric SANITIZER_WEAK_DEFAULT_IMPL
38*4824e7fdSDimitry Andric void __tsan_test_only_on_fork() {}
39*4824e7fdSDimitry Andric 
4068d75effSDimitry Andric namespace __tsan {
4168d75effSDimitry Andric 
42349cc55cSDimitry Andric #if !SANITIZER_GO
43349cc55cSDimitry Andric void (*on_initialize)(void);
44349cc55cSDimitry Andric int (*on_finalize)(int);
45349cc55cSDimitry Andric #endif
46349cc55cSDimitry Andric 
4768d75effSDimitry Andric #if !SANITIZER_GO && !SANITIZER_MAC
4868d75effSDimitry Andric __attribute__((tls_model("initial-exec")))
49349cc55cSDimitry Andric THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
50349cc55cSDimitry Andric     SANITIZER_CACHE_LINE_SIZE);
5168d75effSDimitry Andric #endif
52349cc55cSDimitry Andric static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
5368d75effSDimitry Andric Context *ctx;
5468d75effSDimitry Andric 
5568d75effSDimitry Andric // Can be overriden by a front-end.
5668d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS
5768d75effSDimitry Andric bool OnFinalize(bool failed);
5868d75effSDimitry Andric void OnInitialize();
5968d75effSDimitry Andric #else
60fe6060f1SDimitry Andric #include <dlfcn.h>
6168d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL
6268d75effSDimitry Andric bool OnFinalize(bool failed) {
63fe6060f1SDimitry Andric #if !SANITIZER_GO
64349cc55cSDimitry Andric   if (on_finalize)
65349cc55cSDimitry Andric     return on_finalize(failed);
66fe6060f1SDimitry Andric #endif
6768d75effSDimitry Andric   return failed;
6868d75effSDimitry Andric }
6968d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL
70fe6060f1SDimitry Andric void OnInitialize() {
71fe6060f1SDimitry Andric #if !SANITIZER_GO
72349cc55cSDimitry Andric   if (on_initialize)
73349cc55cSDimitry Andric     on_initialize();
74fe6060f1SDimitry Andric #endif
75fe6060f1SDimitry Andric }
7668d75effSDimitry Andric #endif
7768d75effSDimitry Andric 
78349cc55cSDimitry Andric static ThreadContextBase *CreateThreadContext(Tid tid) {
7968d75effSDimitry Andric   // Map thread trace when context is created.
8068d75effSDimitry Andric   char name[50];
8168d75effSDimitry Andric   internal_snprintf(name, sizeof(name), "trace %u", tid);
8268d75effSDimitry Andric   MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
8368d75effSDimitry Andric   const uptr hdr = GetThreadTraceHeader(tid);
8468d75effSDimitry Andric   internal_snprintf(name, sizeof(name), "trace header %u", tid);
8568d75effSDimitry Andric   MapThreadTrace(hdr, sizeof(Trace), name);
8668d75effSDimitry Andric   new((void*)hdr) Trace();
8768d75effSDimitry Andric   // We are going to use only a small part of the trace with the default
8868d75effSDimitry Andric   // value of history_size. However, the constructor writes to the whole trace.
89fe6060f1SDimitry Andric   // Release the unused part.
9068d75effSDimitry Andric   uptr hdr_end = hdr + sizeof(Trace);
9168d75effSDimitry Andric   hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
9268d75effSDimitry Andric   hdr_end = RoundUp(hdr_end, GetPageSizeCached());
93fe6060f1SDimitry Andric   if (hdr_end < hdr + sizeof(Trace)) {
94fe6060f1SDimitry Andric     ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
95fe6060f1SDimitry Andric     uptr unused = hdr + sizeof(Trace) - hdr_end;
96fe6060f1SDimitry Andric     if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
97349cc55cSDimitry Andric       Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
98349cc55cSDimitry Andric              unused);
99fe6060f1SDimitry Andric       CHECK("unable to mprotect" && 0);
100fe6060f1SDimitry Andric     }
101fe6060f1SDimitry Andric   }
102349cc55cSDimitry Andric   return New<ThreadContext>(tid);
10368d75effSDimitry Andric }
10468d75effSDimitry Andric 
10568d75effSDimitry Andric #if !SANITIZER_GO
10668d75effSDimitry Andric static const u32 kThreadQuarantineSize = 16;
10768d75effSDimitry Andric #else
10868d75effSDimitry Andric static const u32 kThreadQuarantineSize = 64;
10968d75effSDimitry Andric #endif
11068d75effSDimitry Andric 
11168d75effSDimitry Andric Context::Context()
112fe6060f1SDimitry Andric     : initialized(),
113fe6060f1SDimitry Andric       report_mtx(MutexTypeReport),
114fe6060f1SDimitry Andric       nreported(),
115349cc55cSDimitry Andric       thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
116349cc55cSDimitry Andric                       kMaxTidReuse),
117fe6060f1SDimitry Andric       racy_mtx(MutexTypeRacy),
118fe6060f1SDimitry Andric       racy_stacks(),
119fe6060f1SDimitry Andric       racy_addresses(),
120fe6060f1SDimitry Andric       fired_suppressions_mtx(MutexTypeFired),
121fe6060f1SDimitry Andric       clock_alloc(LINKER_INITIALIZED, "clock allocator") {
12268d75effSDimitry Andric   fired_suppressions.reserve(8);
12368d75effSDimitry Andric }
12468d75effSDimitry Andric 
12568d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization.
126349cc55cSDimitry Andric ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
127fe6060f1SDimitry Andric                          unsigned reuse_count, uptr stk_addr, uptr stk_size,
12868d75effSDimitry Andric                          uptr tls_addr, uptr tls_size)
12968d75effSDimitry Andric     : fast_state(tid, epoch)
13068d75effSDimitry Andric       // Do not touch these, rely on zero initialization,
13168d75effSDimitry Andric       // they may be accessed before the ctor.
13268d75effSDimitry Andric       // , ignore_reads_and_writes()
13368d75effSDimitry Andric       // , ignore_interceptors()
134fe6060f1SDimitry Andric       ,
135fe6060f1SDimitry Andric       clock(tid, reuse_count)
13668d75effSDimitry Andric #if !SANITIZER_GO
137fe6060f1SDimitry Andric       ,
138fe6060f1SDimitry Andric       jmp_bufs()
13968d75effSDimitry Andric #endif
140fe6060f1SDimitry Andric       ,
141fe6060f1SDimitry Andric       tid(tid),
142fe6060f1SDimitry Andric       unique_id(unique_id),
143fe6060f1SDimitry Andric       stk_addr(stk_addr),
144fe6060f1SDimitry Andric       stk_size(stk_size),
145fe6060f1SDimitry Andric       tls_addr(tls_addr),
146fe6060f1SDimitry Andric       tls_size(tls_size)
14768d75effSDimitry Andric #if !SANITIZER_GO
148fe6060f1SDimitry Andric       ,
149fe6060f1SDimitry Andric       last_sleep_clock(tid)
15068d75effSDimitry Andric #endif
15168d75effSDimitry Andric {
152349cc55cSDimitry Andric   CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
153349cc55cSDimitry Andric #if !SANITIZER_GO
154349cc55cSDimitry Andric   // C/C++ uses fixed size shadow stack.
155349cc55cSDimitry Andric   const int kInitStackSize = kShadowStackSize;
156349cc55cSDimitry Andric   shadow_stack = static_cast<uptr *>(
157349cc55cSDimitry Andric       MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
158349cc55cSDimitry Andric   SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
159349cc55cSDimitry Andric                               kInitStackSize * sizeof(uptr));
160349cc55cSDimitry Andric #else
161349cc55cSDimitry Andric   // Go uses malloc-allocated shadow stack with dynamic size.
162349cc55cSDimitry Andric   const int kInitStackSize = 8;
163349cc55cSDimitry Andric   shadow_stack = static_cast<uptr *>(Alloc(kInitStackSize * sizeof(uptr)));
164349cc55cSDimitry Andric #endif
165349cc55cSDimitry Andric   shadow_stack_pos = shadow_stack;
166349cc55cSDimitry Andric   shadow_stack_end = shadow_stack + kInitStackSize;
16768d75effSDimitry Andric }
16868d75effSDimitry Andric 
16968d75effSDimitry Andric #if !SANITIZER_GO
170349cc55cSDimitry Andric void MemoryProfiler(u64 uptime) {
171349cc55cSDimitry Andric   if (ctx->memprof_fd == kInvalidFd)
172349cc55cSDimitry Andric     return;
17368d75effSDimitry Andric   InternalMmapVector<char> buf(4096);
174349cc55cSDimitry Andric   WriteMemoryProfile(buf.data(), buf.size(), uptime);
175349cc55cSDimitry Andric   WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
176349cc55cSDimitry Andric }
177349cc55cSDimitry Andric 
178349cc55cSDimitry Andric void InitializeMemoryProfiler() {
179349cc55cSDimitry Andric   ctx->memprof_fd = kInvalidFd;
180349cc55cSDimitry Andric   const char *fname = flags()->profile_memory;
181349cc55cSDimitry Andric   if (!fname || !fname[0])
182349cc55cSDimitry Andric     return;
183349cc55cSDimitry Andric   if (internal_strcmp(fname, "stdout") == 0) {
184349cc55cSDimitry Andric     ctx->memprof_fd = 1;
185349cc55cSDimitry Andric   } else if (internal_strcmp(fname, "stderr") == 0) {
186349cc55cSDimitry Andric     ctx->memprof_fd = 2;
187349cc55cSDimitry Andric   } else {
188349cc55cSDimitry Andric     InternalScopedString filename;
189349cc55cSDimitry Andric     filename.append("%s.%d", fname, (int)internal_getpid());
190349cc55cSDimitry Andric     ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
191349cc55cSDimitry Andric     if (ctx->memprof_fd == kInvalidFd) {
192349cc55cSDimitry Andric       Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
193349cc55cSDimitry Andric              filename.data());
194349cc55cSDimitry Andric       return;
195349cc55cSDimitry Andric     }
196349cc55cSDimitry Andric   }
197349cc55cSDimitry Andric   MemoryProfiler(0);
198349cc55cSDimitry Andric   MaybeSpawnBackgroundThread();
19968d75effSDimitry Andric }
20068d75effSDimitry Andric 
2015ffd83dbSDimitry Andric static void *BackgroundThread(void *arg) {
20268d75effSDimitry Andric   // This is a non-initialized non-user thread, nothing to see here.
20368d75effSDimitry Andric   // We don't use ScopedIgnoreInterceptors, because we want ignores to be
20468d75effSDimitry Andric   // enabled even when the thread function exits (e.g. during pthread thread
20568d75effSDimitry Andric   // shutdown code).
206349cc55cSDimitry Andric   cur_thread_init()->ignore_interceptors++;
20768d75effSDimitry Andric   const u64 kMs2Ns = 1000 * 1000;
208349cc55cSDimitry Andric   const u64 start = NanoTime();
20968d75effSDimitry Andric 
21068d75effSDimitry Andric   u64 last_flush = NanoTime();
21168d75effSDimitry Andric   uptr last_rss = 0;
21268d75effSDimitry Andric   for (int i = 0;
21368d75effSDimitry Andric       atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
21468d75effSDimitry Andric       i++) {
21568d75effSDimitry Andric     SleepForMillis(100);
21668d75effSDimitry Andric     u64 now = NanoTime();
21768d75effSDimitry Andric 
21868d75effSDimitry Andric     // Flush memory if requested.
21968d75effSDimitry Andric     if (flags()->flush_memory_ms > 0) {
22068d75effSDimitry Andric       if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
22168d75effSDimitry Andric         VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
22268d75effSDimitry Andric         FlushShadowMemory();
22368d75effSDimitry Andric         last_flush = NanoTime();
22468d75effSDimitry Andric       }
22568d75effSDimitry Andric     }
22668d75effSDimitry Andric     if (flags()->memory_limit_mb > 0) {
22768d75effSDimitry Andric       uptr rss = GetRSS();
22868d75effSDimitry Andric       uptr limit = uptr(flags()->memory_limit_mb) << 20;
22968d75effSDimitry Andric       VPrintf(1, "ThreadSanitizer: memory flush check"
23068d75effSDimitry Andric                  " RSS=%llu LAST=%llu LIMIT=%llu\n",
23168d75effSDimitry Andric               (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
23268d75effSDimitry Andric       if (2 * rss > limit + last_rss) {
23368d75effSDimitry Andric         VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
23468d75effSDimitry Andric         FlushShadowMemory();
23568d75effSDimitry Andric         rss = GetRSS();
23668d75effSDimitry Andric         VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
23768d75effSDimitry Andric       }
23868d75effSDimitry Andric       last_rss = rss;
23968d75effSDimitry Andric     }
24068d75effSDimitry Andric 
241349cc55cSDimitry Andric     MemoryProfiler(now - start);
24268d75effSDimitry Andric 
24368d75effSDimitry Andric     // Flush symbolizer cache if requested.
24468d75effSDimitry Andric     if (flags()->flush_symbolizer_ms > 0) {
24568d75effSDimitry Andric       u64 last = atomic_load(&ctx->last_symbolize_time_ns,
24668d75effSDimitry Andric                              memory_order_relaxed);
24768d75effSDimitry Andric       if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
24868d75effSDimitry Andric         Lock l(&ctx->report_mtx);
24968d75effSDimitry Andric         ScopedErrorReportLock l2;
25068d75effSDimitry Andric         SymbolizeFlush();
25168d75effSDimitry Andric         atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
25268d75effSDimitry Andric       }
25368d75effSDimitry Andric     }
25468d75effSDimitry Andric   }
2555ffd83dbSDimitry Andric   return nullptr;
25668d75effSDimitry Andric }
25768d75effSDimitry Andric 
25868d75effSDimitry Andric static void StartBackgroundThread() {
25968d75effSDimitry Andric   ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
26068d75effSDimitry Andric }
26168d75effSDimitry Andric 
26268d75effSDimitry Andric #ifndef __mips__
26368d75effSDimitry Andric static void StopBackgroundThread() {
26468d75effSDimitry Andric   atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
26568d75effSDimitry Andric   internal_join_thread(ctx->background_thread);
26668d75effSDimitry Andric   ctx->background_thread = 0;
26768d75effSDimitry Andric }
26868d75effSDimitry Andric #endif
26968d75effSDimitry Andric #endif
27068d75effSDimitry Andric 
27168d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) {
272349cc55cSDimitry Andric   ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
273349cc55cSDimitry Andric                          reinterpret_cast<uptr>(MemToShadow(addr + size)));
27468d75effSDimitry Andric }
27568d75effSDimitry Andric 
27668d75effSDimitry Andric #if !SANITIZER_GO
277*4824e7fdSDimitry Andric // We call UnmapShadow before the actual munmap, at that point we don't yet
278*4824e7fdSDimitry Andric // know if the provided address/size are sane. We can't call UnmapShadow
279*4824e7fdSDimitry Andric // after the actual munmap becuase at that point the memory range can
280*4824e7fdSDimitry Andric // already be reused for something else, so we can't rely on the munmap
281*4824e7fdSDimitry Andric // return value to understand is the values are sane.
282*4824e7fdSDimitry Andric // While calling munmap with insane values (non-canonical address, negative
283*4824e7fdSDimitry Andric // size, etc) is an error, the kernel won't crash. We must also try to not
284*4824e7fdSDimitry Andric // crash as the failure mode is very confusing (paging fault inside of the
285*4824e7fdSDimitry Andric // runtime on some derived shadow address).
286*4824e7fdSDimitry Andric static bool IsValidMmapRange(uptr addr, uptr size) {
287*4824e7fdSDimitry Andric   if (size == 0)
288*4824e7fdSDimitry Andric     return true;
289*4824e7fdSDimitry Andric   if (static_cast<sptr>(size) < 0)
290*4824e7fdSDimitry Andric     return false;
291*4824e7fdSDimitry Andric   if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
292*4824e7fdSDimitry Andric     return false;
293*4824e7fdSDimitry Andric   // Check that if the start of the region belongs to one of app ranges,
294*4824e7fdSDimitry Andric   // end of the region belongs to the same region.
295*4824e7fdSDimitry Andric   const uptr ranges[][2] = {
296*4824e7fdSDimitry Andric       {LoAppMemBeg(), LoAppMemEnd()},
297*4824e7fdSDimitry Andric       {MidAppMemBeg(), MidAppMemEnd()},
298*4824e7fdSDimitry Andric       {HiAppMemBeg(), HiAppMemEnd()},
299*4824e7fdSDimitry Andric   };
300*4824e7fdSDimitry Andric   for (auto range : ranges) {
301*4824e7fdSDimitry Andric     if (addr >= range[0] && addr < range[1])
302*4824e7fdSDimitry Andric       return addr + size <= range[1];
303*4824e7fdSDimitry Andric   }
304*4824e7fdSDimitry Andric   return false;
305*4824e7fdSDimitry Andric }
306*4824e7fdSDimitry Andric 
30768d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
308*4824e7fdSDimitry Andric   if (size == 0 || !IsValidMmapRange(addr, size))
309*4824e7fdSDimitry Andric     return;
31068d75effSDimitry Andric   DontNeedShadowFor(addr, size);
31168d75effSDimitry Andric   ScopedGlobalProcessor sgp;
31268d75effSDimitry Andric   ctx->metamap.ResetRange(thr->proc(), addr, size);
31368d75effSDimitry Andric }
31468d75effSDimitry Andric #endif
31568d75effSDimitry Andric 
31668d75effSDimitry Andric void MapShadow(uptr addr, uptr size) {
31768d75effSDimitry Andric   // Global data is not 64K aligned, but there are no adjacent mappings,
31868d75effSDimitry Andric   // so we can get away with unaligned mapping.
31968d75effSDimitry Andric   // CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
32068d75effSDimitry Andric   const uptr kPageSize = GetPageSizeCached();
32168d75effSDimitry Andric   uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
32268d75effSDimitry Andric   uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
323e8d8bef9SDimitry Andric   if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
324e8d8bef9SDimitry Andric                                "shadow"))
32568d75effSDimitry Andric     Die();
32668d75effSDimitry Andric 
32768d75effSDimitry Andric   // Meta shadow is 2:1, so tread carefully.
32868d75effSDimitry Andric   static bool data_mapped = false;
32968d75effSDimitry Andric   static uptr mapped_meta_end = 0;
33068d75effSDimitry Andric   uptr meta_begin = (uptr)MemToMeta(addr);
33168d75effSDimitry Andric   uptr meta_end = (uptr)MemToMeta(addr + size);
33268d75effSDimitry Andric   meta_begin = RoundDownTo(meta_begin, 64 << 10);
33368d75effSDimitry Andric   meta_end = RoundUpTo(meta_end, 64 << 10);
33468d75effSDimitry Andric   if (!data_mapped) {
33568d75effSDimitry Andric     // First call maps data+bss.
33668d75effSDimitry Andric     data_mapped = true;
337e8d8bef9SDimitry Andric     if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
338e8d8bef9SDimitry Andric                                  "meta shadow"))
33968d75effSDimitry Andric       Die();
34068d75effSDimitry Andric   } else {
341349cc55cSDimitry Andric     // Mapping continuous heap.
34268d75effSDimitry Andric     // Windows wants 64K alignment.
34368d75effSDimitry Andric     meta_begin = RoundDownTo(meta_begin, 64 << 10);
34468d75effSDimitry Andric     meta_end = RoundUpTo(meta_end, 64 << 10);
34568d75effSDimitry Andric     if (meta_end <= mapped_meta_end)
34668d75effSDimitry Andric       return;
34768d75effSDimitry Andric     if (meta_begin < mapped_meta_end)
34868d75effSDimitry Andric       meta_begin = mapped_meta_end;
349e8d8bef9SDimitry Andric     if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
350e8d8bef9SDimitry Andric                                  "meta shadow"))
35168d75effSDimitry Andric       Die();
35268d75effSDimitry Andric     mapped_meta_end = meta_end;
35368d75effSDimitry Andric   }
354349cc55cSDimitry Andric   VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
355349cc55cSDimitry Andric           addr + size, meta_begin, meta_end);
35668d75effSDimitry Andric }
35768d75effSDimitry Andric 
35868d75effSDimitry Andric void MapThreadTrace(uptr addr, uptr size, const char *name) {
359349cc55cSDimitry Andric   DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
36068d75effSDimitry Andric   CHECK_GE(addr, TraceMemBeg());
36168d75effSDimitry Andric   CHECK_LE(addr + size, TraceMemEnd());
36268d75effSDimitry Andric   CHECK_EQ(addr, addr & ~((64 << 10) - 1));  // windows wants 64K alignment
363e8d8bef9SDimitry Andric   if (!MmapFixedSuperNoReserve(addr, size, name)) {
364349cc55cSDimitry Andric     Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
36568d75effSDimitry Andric            addr, size);
36668d75effSDimitry Andric     Die();
36768d75effSDimitry Andric   }
36868d75effSDimitry Andric }
36968d75effSDimitry Andric 
37068d75effSDimitry Andric #if !SANITIZER_GO
37168d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *,
37268d75effSDimitry Andric                           BufferedStackTrace *stack) {
37368d75effSDimitry Andric   stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
37468d75effSDimitry Andric                 common_flags()->fast_unwind_on_fatal);
37568d75effSDimitry Andric }
37668d75effSDimitry Andric 
37768d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
37868d75effSDimitry Andric   HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
37968d75effSDimitry Andric }
38068d75effSDimitry Andric #endif
38168d75effSDimitry Andric 
382fe6060f1SDimitry Andric void CheckUnwind() {
383fe6060f1SDimitry Andric   // There is high probability that interceptors will check-fail as well,
384fe6060f1SDimitry Andric   // on the other hand there is no sense in processing interceptors
385fe6060f1SDimitry Andric   // since we are going to die soon.
386fe6060f1SDimitry Andric   ScopedIgnoreInterceptors ignore;
387fe6060f1SDimitry Andric #if !SANITIZER_GO
388fe6060f1SDimitry Andric   cur_thread()->ignore_sync++;
389fe6060f1SDimitry Andric   cur_thread()->ignore_reads_and_writes++;
390fe6060f1SDimitry Andric #endif
391fe6060f1SDimitry Andric   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
392fe6060f1SDimitry Andric }
393fe6060f1SDimitry Andric 
394349cc55cSDimitry Andric bool is_initialized;
395349cc55cSDimitry Andric 
39668d75effSDimitry Andric void Initialize(ThreadState *thr) {
39768d75effSDimitry Andric   // Thread safe because done before all threads exist.
39868d75effSDimitry Andric   if (is_initialized)
39968d75effSDimitry Andric     return;
40068d75effSDimitry Andric   is_initialized = true;
40168d75effSDimitry Andric   // We are not ready to handle interceptors yet.
40268d75effSDimitry Andric   ScopedIgnoreInterceptors ignore;
40368d75effSDimitry Andric   SanitizerToolName = "ThreadSanitizer";
40468d75effSDimitry Andric   // Install tool-specific callbacks in sanitizer_common.
405fe6060f1SDimitry Andric   SetCheckUnwindCallback(CheckUnwind);
40668d75effSDimitry Andric 
40768d75effSDimitry Andric   ctx = new(ctx_placeholder) Context;
40868d75effSDimitry Andric   const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
40968d75effSDimitry Andric   const char *options = GetEnv(env_name);
41068d75effSDimitry Andric   CacheBinaryName();
41168d75effSDimitry Andric   CheckASLR();
41268d75effSDimitry Andric   InitializeFlags(&ctx->flags, options, env_name);
41368d75effSDimitry Andric   AvoidCVE_2016_2143();
41468d75effSDimitry Andric   __sanitizer::InitializePlatformEarly();
41568d75effSDimitry Andric   __tsan::InitializePlatformEarly();
41668d75effSDimitry Andric 
41768d75effSDimitry Andric #if !SANITIZER_GO
41868d75effSDimitry Andric   // Re-exec ourselves if we need to set additional env or command line args.
41968d75effSDimitry Andric   MaybeReexec();
42068d75effSDimitry Andric 
42168d75effSDimitry Andric   InitializeAllocator();
42268d75effSDimitry Andric   ReplaceSystemMalloc();
42368d75effSDimitry Andric #endif
42468d75effSDimitry Andric   if (common_flags()->detect_deadlocks)
42568d75effSDimitry Andric     ctx->dd = DDetector::Create(flags());
42668d75effSDimitry Andric   Processor *proc = ProcCreate();
42768d75effSDimitry Andric   ProcWire(proc, thr);
42868d75effSDimitry Andric   InitializeInterceptors();
42968d75effSDimitry Andric   InitializePlatform();
43068d75effSDimitry Andric   InitializeDynamicAnnotations();
43168d75effSDimitry Andric #if !SANITIZER_GO
43268d75effSDimitry Andric   InitializeShadowMemory();
43368d75effSDimitry Andric   InitializeAllocatorLate();
43468d75effSDimitry Andric   InstallDeadlySignalHandlers(TsanOnDeadlySignal);
43568d75effSDimitry Andric #endif
43668d75effSDimitry Andric   // Setup correct file descriptor for error reports.
43768d75effSDimitry Andric   __sanitizer_set_report_path(common_flags()->log_path);
43868d75effSDimitry Andric   InitializeSuppressions();
43968d75effSDimitry Andric #if !SANITIZER_GO
44068d75effSDimitry Andric   InitializeLibIgnore();
44168d75effSDimitry Andric   Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
44268d75effSDimitry Andric #endif
44368d75effSDimitry Andric 
44468d75effSDimitry Andric   VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
44568d75effSDimitry Andric           (int)internal_getpid());
44668d75effSDimitry Andric 
44768d75effSDimitry Andric   // Initialize thread 0.
448349cc55cSDimitry Andric   Tid tid = ThreadCreate(thr, 0, 0, true);
449349cc55cSDimitry Andric   CHECK_EQ(tid, kMainTid);
45068d75effSDimitry Andric   ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
45168d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN
45268d75effSDimitry Andric   __ubsan::InitAsPlugin();
45368d75effSDimitry Andric #endif
45468d75effSDimitry Andric   ctx->initialized = true;
45568d75effSDimitry Andric 
45668d75effSDimitry Andric #if !SANITIZER_GO
45768d75effSDimitry Andric   Symbolizer::LateInitialize();
458349cc55cSDimitry Andric   InitializeMemoryProfiler();
45968d75effSDimitry Andric #endif
46068d75effSDimitry Andric 
46168d75effSDimitry Andric   if (flags()->stop_on_start) {
46268d75effSDimitry Andric     Printf("ThreadSanitizer is suspended at startup (pid %d)."
46368d75effSDimitry Andric            " Call __tsan_resume().\n",
46468d75effSDimitry Andric            (int)internal_getpid());
46568d75effSDimitry Andric     while (__tsan_resumed == 0) {}
46668d75effSDimitry Andric   }
46768d75effSDimitry Andric 
46868d75effSDimitry Andric   OnInitialize();
46968d75effSDimitry Andric }
47068d75effSDimitry Andric 
47168d75effSDimitry Andric void MaybeSpawnBackgroundThread() {
47268d75effSDimitry Andric   // On MIPS, TSan initialization is run before
47368d75effSDimitry Andric   // __pthread_initialize_minimal_internal() is finished, so we can not spawn
47468d75effSDimitry Andric   // new threads.
47568d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__)
47668d75effSDimitry Andric   static atomic_uint32_t bg_thread = {};
47768d75effSDimitry Andric   if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
47868d75effSDimitry Andric       atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
47968d75effSDimitry Andric     StartBackgroundThread();
48068d75effSDimitry Andric     SetSandboxingCallback(StopBackgroundThread);
48168d75effSDimitry Andric   }
48268d75effSDimitry Andric #endif
48368d75effSDimitry Andric }
48468d75effSDimitry Andric 
48568d75effSDimitry Andric 
48668d75effSDimitry Andric int Finalize(ThreadState *thr) {
48768d75effSDimitry Andric   bool failed = false;
48868d75effSDimitry Andric 
489e8d8bef9SDimitry Andric   if (common_flags()->print_module_map == 1)
490e8d8bef9SDimitry Andric     DumpProcessMap();
49168d75effSDimitry Andric 
49268d75effSDimitry Andric   if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
49368d75effSDimitry Andric     SleepForMillis(flags()->atexit_sleep_ms);
49468d75effSDimitry Andric 
49568d75effSDimitry Andric   // Wait for pending reports.
49668d75effSDimitry Andric   ctx->report_mtx.Lock();
49768d75effSDimitry Andric   { ScopedErrorReportLock l; }
49868d75effSDimitry Andric   ctx->report_mtx.Unlock();
49968d75effSDimitry Andric 
50068d75effSDimitry Andric #if !SANITIZER_GO
50168d75effSDimitry Andric   if (Verbosity()) AllocatorPrintStats();
50268d75effSDimitry Andric #endif
50368d75effSDimitry Andric 
50468d75effSDimitry Andric   ThreadFinalize(thr);
50568d75effSDimitry Andric 
50668d75effSDimitry Andric   if (ctx->nreported) {
50768d75effSDimitry Andric     failed = true;
50868d75effSDimitry Andric #if !SANITIZER_GO
50968d75effSDimitry Andric     Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
51068d75effSDimitry Andric #else
51168d75effSDimitry Andric     Printf("Found %d data race(s)\n", ctx->nreported);
51268d75effSDimitry Andric #endif
51368d75effSDimitry Andric   }
51468d75effSDimitry Andric 
51568d75effSDimitry Andric   if (common_flags()->print_suppressions)
51668d75effSDimitry Andric     PrintMatchedSuppressions();
51768d75effSDimitry Andric 
51868d75effSDimitry Andric   failed = OnFinalize(failed);
51968d75effSDimitry Andric 
52068d75effSDimitry Andric   return failed ? common_flags()->exitcode : 0;
52168d75effSDimitry Andric }
52268d75effSDimitry Andric 
52368d75effSDimitry Andric #if !SANITIZER_GO
524fe6060f1SDimitry Andric void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
525349cc55cSDimitry Andric   ctx->thread_registry.Lock();
52668d75effSDimitry Andric   ctx->report_mtx.Lock();
527fe6060f1SDimitry Andric   ScopedErrorReportLock::Lock();
528*4824e7fdSDimitry Andric   AllocatorLock();
529fe6060f1SDimitry Andric   // Suppress all reports in the pthread_atfork callbacks.
530fe6060f1SDimitry Andric   // Reports will deadlock on the report_mtx.
531fe6060f1SDimitry Andric   // We could ignore sync operations as well,
5325ffd83dbSDimitry Andric   // but so far it's unclear if it will do more good or harm.
5335ffd83dbSDimitry Andric   // Unnecessarily ignoring things can lead to false positives later.
534fe6060f1SDimitry Andric   thr->suppress_reports++;
535fe6060f1SDimitry Andric   // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
536fe6060f1SDimitry Andric   // we'll assert in CheckNoLocks() unless we ignore interceptors.
537*4824e7fdSDimitry Andric   // On OS X libSystem_atfork_prepare/parent/child callbacks are called
538*4824e7fdSDimitry Andric   // after/before our callbacks and they call free.
539fe6060f1SDimitry Andric   thr->ignore_interceptors++;
540*4824e7fdSDimitry Andric   // Disables memory write in OnUserAlloc/Free.
541*4824e7fdSDimitry Andric   thr->ignore_reads_and_writes++;
542*4824e7fdSDimitry Andric 
543*4824e7fdSDimitry Andric   __tsan_test_only_on_fork();
54468d75effSDimitry Andric }
54568d75effSDimitry Andric 
546fe6060f1SDimitry Andric void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
547fe6060f1SDimitry Andric   thr->suppress_reports--;  // Enabled in ForkBefore.
548fe6060f1SDimitry Andric   thr->ignore_interceptors--;
549*4824e7fdSDimitry Andric   thr->ignore_reads_and_writes--;
550*4824e7fdSDimitry Andric   AllocatorUnlock();
551fe6060f1SDimitry Andric   ScopedErrorReportLock::Unlock();
55268d75effSDimitry Andric   ctx->report_mtx.Unlock();
553349cc55cSDimitry Andric   ctx->thread_registry.Unlock();
55468d75effSDimitry Andric }
55568d75effSDimitry Andric 
556349cc55cSDimitry Andric void ForkChildAfter(ThreadState *thr, uptr pc,
557349cc55cSDimitry Andric                     bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
558fe6060f1SDimitry Andric   thr->suppress_reports--;  // Enabled in ForkBefore.
559fe6060f1SDimitry Andric   thr->ignore_interceptors--;
560*4824e7fdSDimitry Andric   thr->ignore_reads_and_writes--;
561*4824e7fdSDimitry Andric   AllocatorUnlock();
562fe6060f1SDimitry Andric   ScopedErrorReportLock::Unlock();
56368d75effSDimitry Andric   ctx->report_mtx.Unlock();
564349cc55cSDimitry Andric   ctx->thread_registry.Unlock();
56568d75effSDimitry Andric 
56668d75effSDimitry Andric   uptr nthread = 0;
567349cc55cSDimitry Andric   ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
56868d75effSDimitry Andric   VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
56968d75effSDimitry Andric       " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
57068d75effSDimitry Andric   if (nthread == 1) {
571349cc55cSDimitry Andric     if (start_thread)
57268d75effSDimitry Andric       StartBackgroundThread();
57368d75effSDimitry Andric   } else {
57468d75effSDimitry Andric     // We've just forked a multi-threaded process. We cannot reasonably function
57568d75effSDimitry Andric     // after that (some mutexes may be locked before fork). So just enable
57668d75effSDimitry Andric     // ignores for everything in the hope that we will exec soon.
57768d75effSDimitry Andric     ctx->after_multithreaded_fork = true;
57868d75effSDimitry Andric     thr->ignore_interceptors++;
57968d75effSDimitry Andric     ThreadIgnoreBegin(thr, pc);
58068d75effSDimitry Andric     ThreadIgnoreSyncBegin(thr, pc);
58168d75effSDimitry Andric   }
58268d75effSDimitry Andric }
58368d75effSDimitry Andric #endif
58468d75effSDimitry Andric 
58568d75effSDimitry Andric #if SANITIZER_GO
58668d75effSDimitry Andric NOINLINE
58768d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) {
58868d75effSDimitry Andric   const int sz = thr->shadow_stack_end - thr->shadow_stack;
58968d75effSDimitry Andric   const int newsz = 2 * sz;
590349cc55cSDimitry Andric   auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
59168d75effSDimitry Andric   internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
592349cc55cSDimitry Andric   Free(thr->shadow_stack);
59368d75effSDimitry Andric   thr->shadow_stack = newstack;
59468d75effSDimitry Andric   thr->shadow_stack_pos = newstack + sz;
59568d75effSDimitry Andric   thr->shadow_stack_end = newstack + newsz;
59668d75effSDimitry Andric }
59768d75effSDimitry Andric #endif
59868d75effSDimitry Andric 
599349cc55cSDimitry Andric StackID CurrentStackId(ThreadState *thr, uptr pc) {
60068d75effSDimitry Andric   if (!thr->is_inited)  // May happen during bootstrap.
601349cc55cSDimitry Andric     return kInvalidStackID;
60268d75effSDimitry Andric   if (pc != 0) {
60368d75effSDimitry Andric #if !SANITIZER_GO
60468d75effSDimitry Andric     DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
60568d75effSDimitry Andric #else
60668d75effSDimitry Andric     if (thr->shadow_stack_pos == thr->shadow_stack_end)
60768d75effSDimitry Andric       GrowShadowStack(thr);
60868d75effSDimitry Andric #endif
60968d75effSDimitry Andric     thr->shadow_stack_pos[0] = pc;
61068d75effSDimitry Andric     thr->shadow_stack_pos++;
61168d75effSDimitry Andric   }
612349cc55cSDimitry Andric   StackID id = StackDepotPut(
61368d75effSDimitry Andric       StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
61468d75effSDimitry Andric   if (pc != 0)
61568d75effSDimitry Andric     thr->shadow_stack_pos--;
61668d75effSDimitry Andric   return id;
61768d75effSDimitry Andric }
61868d75effSDimitry Andric 
619349cc55cSDimitry Andric namespace v3 {
620349cc55cSDimitry Andric 
621349cc55cSDimitry Andric NOINLINE
622349cc55cSDimitry Andric void TraceSwitchPart(ThreadState *thr) {
623349cc55cSDimitry Andric   Trace *trace = &thr->tctx->trace;
624349cc55cSDimitry Andric   Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
625349cc55cSDimitry Andric   DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
626349cc55cSDimitry Andric   auto *part = trace->parts.Back();
627349cc55cSDimitry Andric   DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
628349cc55cSDimitry Andric   if (part) {
629349cc55cSDimitry Andric     // We can get here when we still have space in the current trace part.
630349cc55cSDimitry Andric     // The fast-path check in TraceAcquire has false positives in the middle of
631349cc55cSDimitry Andric     // the part. Check if we are indeed at the end of the current part or not,
632349cc55cSDimitry Andric     // and fill any gaps with NopEvent's.
633349cc55cSDimitry Andric     Event *end = &part->events[TracePart::kSize];
634349cc55cSDimitry Andric     DCHECK_GE(pos, &part->events[0]);
635349cc55cSDimitry Andric     DCHECK_LE(pos, end);
636349cc55cSDimitry Andric     if (pos + 1 < end) {
637349cc55cSDimitry Andric       if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
638349cc55cSDimitry Andric           TracePart::kAlignment)
639349cc55cSDimitry Andric         *pos++ = NopEvent;
640349cc55cSDimitry Andric       *pos++ = NopEvent;
641349cc55cSDimitry Andric       DCHECK_LE(pos + 2, end);
642349cc55cSDimitry Andric       atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
643349cc55cSDimitry Andric       // Ensure we setup trace so that the next TraceAcquire
644349cc55cSDimitry Andric       // won't detect trace part end.
645349cc55cSDimitry Andric       Event *ev;
646349cc55cSDimitry Andric       CHECK(TraceAcquire(thr, &ev));
647349cc55cSDimitry Andric       return;
648349cc55cSDimitry Andric     }
649349cc55cSDimitry Andric     // We are indeed at the end.
650349cc55cSDimitry Andric     for (; pos < end; pos++) *pos = NopEvent;
651349cc55cSDimitry Andric   }
652349cc55cSDimitry Andric #if !SANITIZER_GO
653349cc55cSDimitry Andric   if (ctx->after_multithreaded_fork) {
654349cc55cSDimitry Andric     // We just need to survive till exec.
655349cc55cSDimitry Andric     CHECK(part);
656349cc55cSDimitry Andric     atomic_store_relaxed(&thr->trace_pos,
657349cc55cSDimitry Andric                          reinterpret_cast<uptr>(&part->events[0]));
658349cc55cSDimitry Andric     return;
659349cc55cSDimitry Andric   }
660349cc55cSDimitry Andric #endif
661349cc55cSDimitry Andric   part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
662349cc55cSDimitry Andric   part->trace = trace;
663349cc55cSDimitry Andric   thr->trace_prev_pc = 0;
664349cc55cSDimitry Andric   {
665349cc55cSDimitry Andric     Lock lock(&trace->mtx);
666349cc55cSDimitry Andric     trace->parts.PushBack(part);
667349cc55cSDimitry Andric     atomic_store_relaxed(&thr->trace_pos,
668349cc55cSDimitry Andric                          reinterpret_cast<uptr>(&part->events[0]));
669349cc55cSDimitry Andric   }
670349cc55cSDimitry Andric   // Make this part self-sufficient by restoring the current stack
671349cc55cSDimitry Andric   // and mutex set in the beginning of the trace.
672349cc55cSDimitry Andric   TraceTime(thr);
673349cc55cSDimitry Andric   for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
674349cc55cSDimitry Andric     CHECK(TryTraceFunc(thr, *pos));
675349cc55cSDimitry Andric   for (uptr i = 0; i < thr->mset.Size(); i++) {
676349cc55cSDimitry Andric     MutexSet::Desc d = thr->mset.Get(i);
677349cc55cSDimitry Andric     TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
678349cc55cSDimitry Andric                    d.addr, d.stack_id);
679349cc55cSDimitry Andric   }
680349cc55cSDimitry Andric }
681349cc55cSDimitry Andric 
682349cc55cSDimitry Andric }  // namespace v3
683349cc55cSDimitry Andric 
68468d75effSDimitry Andric void TraceSwitch(ThreadState *thr) {
68568d75effSDimitry Andric #if !SANITIZER_GO
68668d75effSDimitry Andric   if (ctx->after_multithreaded_fork)
68768d75effSDimitry Andric     return;
68868d75effSDimitry Andric #endif
68968d75effSDimitry Andric   thr->nomalloc++;
69068d75effSDimitry Andric   Trace *thr_trace = ThreadTrace(thr->tid);
69168d75effSDimitry Andric   Lock l(&thr_trace->mtx);
69268d75effSDimitry Andric   unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
69368d75effSDimitry Andric   TraceHeader *hdr = &thr_trace->headers[trace];
69468d75effSDimitry Andric   hdr->epoch0 = thr->fast_state.epoch();
69568d75effSDimitry Andric   ObtainCurrentStack(thr, 0, &hdr->stack0);
69668d75effSDimitry Andric   hdr->mset0 = thr->mset;
69768d75effSDimitry Andric   thr->nomalloc--;
69868d75effSDimitry Andric }
69968d75effSDimitry Andric 
700349cc55cSDimitry Andric Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
70168d75effSDimitry Andric 
70268d75effSDimitry Andric uptr TraceTopPC(ThreadState *thr) {
70368d75effSDimitry Andric   Event *events = (Event*)GetThreadTrace(thr->tid);
70468d75effSDimitry Andric   uptr pc = events[thr->fast_state.GetTracePos()];
70568d75effSDimitry Andric   return pc;
70668d75effSDimitry Andric }
70768d75effSDimitry Andric 
70868d75effSDimitry Andric uptr TraceSize() {
70968d75effSDimitry Andric   return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
71068d75effSDimitry Andric }
71168d75effSDimitry Andric 
71268d75effSDimitry Andric uptr TraceParts() {
71368d75effSDimitry Andric   return TraceSize() / kTracePartSize;
71468d75effSDimitry Andric }
71568d75effSDimitry Andric 
71668d75effSDimitry Andric #if !SANITIZER_GO
71768d75effSDimitry Andric extern "C" void __tsan_trace_switch() {
71868d75effSDimitry Andric   TraceSwitch(cur_thread());
71968d75effSDimitry Andric }
72068d75effSDimitry Andric 
72168d75effSDimitry Andric extern "C" void __tsan_report_race() {
72268d75effSDimitry Andric   ReportRace(cur_thread());
72368d75effSDimitry Andric }
72468d75effSDimitry Andric #endif
72568d75effSDimitry Andric 
726349cc55cSDimitry Andric void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
72768d75effSDimitry Andric   DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
72868d75effSDimitry Andric   thr->ignore_reads_and_writes++;
72968d75effSDimitry Andric   CHECK_GT(thr->ignore_reads_and_writes, 0);
73068d75effSDimitry Andric   thr->fast_state.SetIgnoreBit();
73168d75effSDimitry Andric #if !SANITIZER_GO
732349cc55cSDimitry Andric   if (pc && !ctx->after_multithreaded_fork)
73368d75effSDimitry Andric     thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
73468d75effSDimitry Andric #endif
73568d75effSDimitry Andric }
73668d75effSDimitry Andric 
737349cc55cSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr) {
73868d75effSDimitry Andric   DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
73968d75effSDimitry Andric   CHECK_GT(thr->ignore_reads_and_writes, 0);
74068d75effSDimitry Andric   thr->ignore_reads_and_writes--;
74168d75effSDimitry Andric   if (thr->ignore_reads_and_writes == 0) {
74268d75effSDimitry Andric     thr->fast_state.ClearIgnoreBit();
74368d75effSDimitry Andric #if !SANITIZER_GO
74468d75effSDimitry Andric     thr->mop_ignore_set.Reset();
74568d75effSDimitry Andric #endif
74668d75effSDimitry Andric   }
74768d75effSDimitry Andric }
74868d75effSDimitry Andric 
74968d75effSDimitry Andric #if !SANITIZER_GO
75068d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE
75168d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() {
75268d75effSDimitry Andric   ThreadState *thr = cur_thread();
75368d75effSDimitry Andric   return thr->shadow_stack_pos - thr->shadow_stack;
75468d75effSDimitry Andric }
75568d75effSDimitry Andric #endif
75668d75effSDimitry Andric 
757349cc55cSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
75868d75effSDimitry Andric   DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
75968d75effSDimitry Andric   thr->ignore_sync++;
76068d75effSDimitry Andric   CHECK_GT(thr->ignore_sync, 0);
76168d75effSDimitry Andric #if !SANITIZER_GO
762349cc55cSDimitry Andric   if (pc && !ctx->after_multithreaded_fork)
76368d75effSDimitry Andric     thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
76468d75effSDimitry Andric #endif
76568d75effSDimitry Andric }
76668d75effSDimitry Andric 
767349cc55cSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr) {
76868d75effSDimitry Andric   DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
76968d75effSDimitry Andric   CHECK_GT(thr->ignore_sync, 0);
77068d75effSDimitry Andric   thr->ignore_sync--;
77168d75effSDimitry Andric #if !SANITIZER_GO
77268d75effSDimitry Andric   if (thr->ignore_sync == 0)
77368d75effSDimitry Andric     thr->sync_ignore_set.Reset();
77468d75effSDimitry Andric #endif
77568d75effSDimitry Andric }
77668d75effSDimitry Andric 
77768d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const {
77868d75effSDimitry Andric   return hash[0] == other.hash[0] && hash[1] == other.hash[1];
77968d75effSDimitry Andric }
78068d75effSDimitry Andric 
78168d75effSDimitry Andric #if SANITIZER_DEBUG
78268d75effSDimitry Andric void build_consistency_debug() {}
78368d75effSDimitry Andric #else
78468d75effSDimitry Andric void build_consistency_release() {}
78568d75effSDimitry Andric #endif
78668d75effSDimitry Andric 
78768d75effSDimitry Andric }  // namespace __tsan
78868d75effSDimitry Andric 
789fe6060f1SDimitry Andric #if SANITIZER_CHECK_DEADLOCKS
790fe6060f1SDimitry Andric namespace __sanitizer {
791fe6060f1SDimitry Andric using namespace __tsan;
792fe6060f1SDimitry Andric MutexMeta mutex_meta[] = {
793fe6060f1SDimitry Andric     {MutexInvalid, "Invalid", {}},
794fe6060f1SDimitry Andric     {MutexThreadRegistry, "ThreadRegistry", {}},
795*4824e7fdSDimitry Andric     {MutexTypeTrace, "Trace", {}},
796*4824e7fdSDimitry Andric     {MutexTypeReport,
797*4824e7fdSDimitry Andric      "Report",
798*4824e7fdSDimitry Andric      {MutexTypeSyncVar, MutexTypeGlobalProc, MutexTypeTrace}},
799*4824e7fdSDimitry Andric     {MutexTypeSyncVar, "SyncVar", {MutexTypeTrace}},
800fe6060f1SDimitry Andric     {MutexTypeAnnotations, "Annotations", {}},
801fe6060f1SDimitry Andric     {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
802fe6060f1SDimitry Andric     {MutexTypeFired, "Fired", {MutexLeaf}},
803fe6060f1SDimitry Andric     {MutexTypeRacy, "Racy", {MutexLeaf}},
804fe6060f1SDimitry Andric     {MutexTypeGlobalProc, "GlobalProc", {}},
805*4824e7fdSDimitry Andric     {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
806fe6060f1SDimitry Andric     {},
807fe6060f1SDimitry Andric };
808fe6060f1SDimitry Andric 
809fe6060f1SDimitry Andric void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
810fe6060f1SDimitry Andric }  // namespace __sanitizer
811fe6060f1SDimitry Andric #endif
812