1 //===-- tsan_rtl.cpp ------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // Main file (entry points) for the TSan run-time.
12 //===----------------------------------------------------------------------===//
13
14 #include "tsan_rtl.h"
15
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_file.h"
19 #include "sanitizer_common/sanitizer_libc.h"
20 #include "sanitizer_common/sanitizer_placement_new.h"
21 #include "sanitizer_common/sanitizer_stackdepot.h"
22 #include "sanitizer_common/sanitizer_symbolizer.h"
23 #include "tsan_defs.h"
24 #include "tsan_interface.h"
25 #include "tsan_mman.h"
26 #include "tsan_platform.h"
27 #include "tsan_suppressions.h"
28 #include "tsan_symbolize.h"
29 #include "ubsan/ubsan_init.h"
30
31 volatile int __tsan_resumed = 0;
32
__tsan_resume()33 extern "C" void __tsan_resume() {
34 __tsan_resumed = 1;
35 }
36
37 namespace __tsan {
38
39 #if !SANITIZER_GO
40 void (*on_initialize)(void);
41 int (*on_finalize)(int);
42 #endif
43
44 // XXX PR lib/58349 (https://gnats.NetBSD.org/58349): NetBSD ld.elf_so
45 // doesn't support TLS alignment beyond void *, so we have to buffer
46 // some extra space and do the alignment ourselves at all the reference
47 // sites.
48 #if !SANITIZER_GO && !SANITIZER_MAC
49 __attribute__((tls_model("initial-exec")))
50 THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState) + SANITIZER_CACHE_LINE_SIZE - 1] ALIGNED(
51 SANITIZER_CACHE_LINE_SIZE);
52 #endif
53 static char ctx_placeholder[sizeof(Context) + SANITIZER_CACHE_LINE_SIZE - 1] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
54 Context *ctx;
55
56 // Can be overriden by a front-end.
57 #ifdef TSAN_EXTERNAL_HOOKS
58 bool OnFinalize(bool failed);
59 void OnInitialize();
60 #else
61 #include <dlfcn.h>
62 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)63 bool OnFinalize(bool failed) {
64 #if !SANITIZER_GO
65 if (on_finalize)
66 return on_finalize(failed);
67 #endif
68 return failed;
69 }
70 SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()71 void OnInitialize() {
72 #if !SANITIZER_GO
73 if (on_initialize)
74 on_initialize();
75 #endif
76 }
77 #endif
78
CreateThreadContext(Tid tid)79 static ThreadContextBase *CreateThreadContext(Tid tid) {
80 // Map thread trace when context is created.
81 char name[50];
82 internal_snprintf(name, sizeof(name), "trace %u", tid);
83 MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
84 const uptr hdr = GetThreadTraceHeader(tid);
85 internal_snprintf(name, sizeof(name), "trace header %u", tid);
86 MapThreadTrace(hdr, sizeof(Trace), name);
87 new((void*)hdr) Trace();
88 // We are going to use only a small part of the trace with the default
89 // value of history_size. However, the constructor writes to the whole trace.
90 // Release the unused part.
91 uptr hdr_end = hdr + sizeof(Trace);
92 hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
93 hdr_end = RoundUp(hdr_end, GetPageSizeCached());
94 if (hdr_end < hdr + sizeof(Trace)) {
95 ReleaseMemoryPagesToOS(hdr_end, hdr + sizeof(Trace));
96 uptr unused = hdr + sizeof(Trace) - hdr_end;
97 if (hdr_end != (uptr)MmapFixedNoAccess(hdr_end, unused)) {
98 Report("ThreadSanitizer: failed to mprotect [0x%zx-0x%zx) \n", hdr_end,
99 unused);
100 CHECK("unable to mprotect" && 0);
101 }
102 }
103 return New<ThreadContext>(tid);
104 }
105
106 #if !SANITIZER_GO
107 static const u32 kThreadQuarantineSize = 16;
108 #else
109 static const u32 kThreadQuarantineSize = 64;
110 #endif
111
Context()112 Context::Context()
113 : initialized(),
114 report_mtx(MutexTypeReport),
115 nreported(),
116 thread_registry(CreateThreadContext, kMaxTid, kThreadQuarantineSize,
117 kMaxTidReuse),
118 racy_mtx(MutexTypeRacy),
119 racy_stacks(),
120 racy_addresses(),
121 fired_suppressions_mtx(MutexTypeFired),
122 clock_alloc(LINKER_INITIALIZED, "clock allocator") {
123 fired_suppressions.reserve(8);
124 }
125
126 // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Context * ctx,Tid tid,int unique_id,u64 epoch,unsigned reuse_count,uptr stk_addr,uptr stk_size,uptr tls_addr,uptr tls_size)127 ThreadState::ThreadState(Context *ctx, Tid tid, int unique_id, u64 epoch,
128 unsigned reuse_count, uptr stk_addr, uptr stk_size,
129 uptr tls_addr, uptr tls_size)
130 : fast_state(tid, epoch)
131 // Do not touch these, rely on zero initialization,
132 // they may be accessed before the ctor.
133 // , ignore_reads_and_writes()
134 // , ignore_interceptors()
135 ,
136 clock(tid, reuse_count)
137 #if !SANITIZER_GO
138 ,
139 jmp_bufs()
140 #endif
141 ,
142 tid(tid),
143 unique_id(unique_id),
144 stk_addr(stk_addr),
145 stk_size(stk_size),
146 tls_addr(tls_addr),
147 tls_size(tls_size)
148 #if !SANITIZER_GO
149 ,
150 last_sleep_clock(tid)
151 #endif
152 {
153 CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
154 #if !SANITIZER_GO
155 shadow_stack_pos = shadow_stack;
156 shadow_stack_end = shadow_stack + kShadowStackSize;
157 #else
158 // Setup dynamic shadow stack.
159 const int kInitStackSize = 8;
160 shadow_stack = (uptr *)Alloc(kInitStackSize * sizeof(uptr));
161 shadow_stack_pos = shadow_stack;
162 shadow_stack_end = shadow_stack + kInitStackSize;
163 #endif
164 }
165
166 #if !SANITIZER_GO
MemoryProfiler(u64 uptime)167 void MemoryProfiler(u64 uptime) {
168 if (ctx->memprof_fd == kInvalidFd)
169 return;
170 InternalMmapVector<char> buf(4096);
171 WriteMemoryProfile(buf.data(), buf.size(), uptime);
172 WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
173 }
174
InitializeMemoryProfiler()175 void InitializeMemoryProfiler() {
176 ctx->memprof_fd = kInvalidFd;
177 const char *fname = flags()->profile_memory;
178 if (!fname || !fname[0])
179 return;
180 if (internal_strcmp(fname, "stdout") == 0) {
181 ctx->memprof_fd = 1;
182 } else if (internal_strcmp(fname, "stderr") == 0) {
183 ctx->memprof_fd = 2;
184 } else {
185 InternalScopedString filename;
186 filename.append("%s.%d", fname, (int)internal_getpid());
187 ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
188 if (ctx->memprof_fd == kInvalidFd) {
189 Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
190 filename.data());
191 return;
192 }
193 }
194 MemoryProfiler(0);
195 MaybeSpawnBackgroundThread();
196 }
197
BackgroundThread(void * arg)198 static void *BackgroundThread(void *arg) {
199 // This is a non-initialized non-user thread, nothing to see here.
200 // We don't use ScopedIgnoreInterceptors, because we want ignores to be
201 // enabled even when the thread function exits (e.g. during pthread thread
202 // shutdown code).
203 cur_thread_init()->ignore_interceptors++;
204 const u64 kMs2Ns = 1000 * 1000;
205 const u64 start = NanoTime();
206
207 u64 last_flush = NanoTime();
208 uptr last_rss = 0;
209 for (int i = 0;
210 atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
211 i++) {
212 SleepForMillis(100);
213 u64 now = NanoTime();
214
215 // Flush memory if requested.
216 if (flags()->flush_memory_ms > 0) {
217 if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
218 VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
219 FlushShadowMemory();
220 last_flush = NanoTime();
221 }
222 }
223 if (flags()->memory_limit_mb > 0) {
224 uptr rss = GetRSS();
225 uptr limit = uptr(flags()->memory_limit_mb) << 20;
226 VPrintf(1, "ThreadSanitizer: memory flush check"
227 " RSS=%llu LAST=%llu LIMIT=%llu\n",
228 (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
229 if (2 * rss > limit + last_rss) {
230 VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
231 FlushShadowMemory();
232 rss = GetRSS();
233 VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
234 }
235 last_rss = rss;
236 }
237
238 MemoryProfiler(now - start);
239
240 // Flush symbolizer cache if requested.
241 if (flags()->flush_symbolizer_ms > 0) {
242 u64 last = atomic_load(&ctx->last_symbolize_time_ns,
243 memory_order_relaxed);
244 if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
245 Lock l(&ctx->report_mtx);
246 ScopedErrorReportLock l2;
247 SymbolizeFlush();
248 atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
249 }
250 }
251 }
252 return nullptr;
253 }
254
StartBackgroundThread()255 static void StartBackgroundThread() {
256 ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
257 }
258
259 #ifndef __mips__
StopBackgroundThread()260 static void StopBackgroundThread() {
261 atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
262 internal_join_thread(ctx->background_thread);
263 ctx->background_thread = 0;
264 }
265 #endif
266 #endif
267
DontNeedShadowFor(uptr addr,uptr size)268 void DontNeedShadowFor(uptr addr, uptr size) {
269 ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
270 reinterpret_cast<uptr>(MemToShadow(addr + size)));
271 }
272
273 #if !SANITIZER_GO
UnmapShadow(ThreadState * thr,uptr addr,uptr size)274 void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
275 if (size == 0) return;
276 DontNeedShadowFor(addr, size);
277 ScopedGlobalProcessor sgp;
278 ctx->metamap.ResetRange(thr->proc(), addr, size);
279 }
280 #endif
281
MapShadow(uptr addr,uptr size)282 void MapShadow(uptr addr, uptr size) {
283 // Global data is not 64K aligned, but there are no adjacent mappings,
284 // so we can get away with unaligned mapping.
285 // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
286 const uptr kPageSize = GetPageSizeCached();
287 uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
288 uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
289 if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
290 "shadow"))
291 Die();
292
293 // Meta shadow is 2:1, so tread carefully.
294 static bool data_mapped = false;
295 static uptr mapped_meta_end = 0;
296 uptr meta_begin = (uptr)MemToMeta(addr);
297 uptr meta_end = (uptr)MemToMeta(addr + size);
298 meta_begin = RoundDownTo(meta_begin, 64 << 10);
299 meta_end = RoundUpTo(meta_end, 64 << 10);
300 if (!data_mapped) {
301 // First call maps data+bss.
302 data_mapped = true;
303 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
304 "meta shadow"))
305 Die();
306 } else {
307 // Mapping continuous heap.
308 // Windows wants 64K alignment.
309 meta_begin = RoundDownTo(meta_begin, 64 << 10);
310 meta_end = RoundUpTo(meta_end, 64 << 10);
311 if (meta_end <= mapped_meta_end)
312 return;
313 if (meta_begin < mapped_meta_end)
314 meta_begin = mapped_meta_end;
315 if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
316 "meta shadow"))
317 Die();
318 mapped_meta_end = meta_end;
319 }
320 VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
321 addr + size, meta_begin, meta_end);
322 }
323
MapThreadTrace(uptr addr,uptr size,const char * name)324 void MapThreadTrace(uptr addr, uptr size, const char *name) {
325 DPrintf("#0: Mapping trace at 0x%zx-0x%zx(0x%zx)\n", addr, addr + size, size);
326 CHECK_GE(addr, TraceMemBeg());
327 CHECK_LE(addr + size, TraceMemEnd());
328 CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
329 if (!MmapFixedSuperNoReserve(addr, size, name)) {
330 Printf("FATAL: ThreadSanitizer can not mmap thread trace (0x%zx/0x%zx)\n",
331 addr, size);
332 Die();
333 }
334 }
335
336 #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)337 static void OnStackUnwind(const SignalContext &sig, const void *,
338 BufferedStackTrace *stack) {
339 stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
340 common_flags()->fast_unwind_on_fatal);
341 }
342
TsanOnDeadlySignal(int signo,void * siginfo,void * context)343 static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
344 HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
345 }
346 #endif
347
CheckUnwind()348 void CheckUnwind() {
349 // There is high probability that interceptors will check-fail as well,
350 // on the other hand there is no sense in processing interceptors
351 // since we are going to die soon.
352 ScopedIgnoreInterceptors ignore;
353 #if !SANITIZER_GO
354 cur_thread()->ignore_sync++;
355 cur_thread()->ignore_reads_and_writes++;
356 #endif
357 PrintCurrentStackSlow(StackTrace::GetCurrentPc());
358 }
359
360 bool is_initialized;
361
Initialize(ThreadState * thr)362 void Initialize(ThreadState *thr) {
363 // Thread safe because done before all threads exist.
364 if (is_initialized)
365 return;
366 is_initialized = true;
367 // We are not ready to handle interceptors yet.
368 ScopedIgnoreInterceptors ignore;
369 SanitizerToolName = "ThreadSanitizer";
370 // Install tool-specific callbacks in sanitizer_common.
371 SetCheckUnwindCallback(CheckUnwind);
372
373 ctx = new(reinterpret_cast<char *>((reinterpret_cast<uptr>(ctx_placeholder) + SANITIZER_CACHE_LINE_SIZE - 1) & ~static_cast<uptr>(SANITIZER_CACHE_LINE_SIZE - 1))) Context;
374 const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
375 const char *options = GetEnv(env_name);
376 CacheBinaryName();
377 CheckASLR();
378 InitializeFlags(&ctx->flags, options, env_name);
379 AvoidCVE_2016_2143();
380 __sanitizer::InitializePlatformEarly();
381 __tsan::InitializePlatformEarly();
382
383 #if !SANITIZER_GO
384 // Re-exec ourselves if we need to set additional env or command line args.
385 MaybeReexec();
386
387 InitializeAllocator();
388 ReplaceSystemMalloc();
389 #endif
390 if (common_flags()->detect_deadlocks)
391 ctx->dd = DDetector::Create(flags());
392 Processor *proc = ProcCreate();
393 ProcWire(proc, thr);
394 InitializeInterceptors();
395 InitializePlatform();
396 InitializeDynamicAnnotations();
397 #if !SANITIZER_GO
398 InitializeShadowMemory();
399 InitializeAllocatorLate();
400 InstallDeadlySignalHandlers(TsanOnDeadlySignal);
401 #endif
402 // Setup correct file descriptor for error reports.
403 __sanitizer_set_report_path(common_flags()->log_path);
404 InitializeSuppressions();
405 #if !SANITIZER_GO
406 InitializeLibIgnore();
407 Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
408 #endif
409
410 VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
411 (int)internal_getpid());
412
413 // Initialize thread 0.
414 Tid tid = ThreadCreate(thr, 0, 0, true);
415 CHECK_EQ(tid, kMainTid);
416 ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
417 #if TSAN_CONTAINS_UBSAN
418 __ubsan::InitAsPlugin();
419 #endif
420 ctx->initialized = true;
421
422 #if !SANITIZER_GO
423 Symbolizer::LateInitialize();
424 InitializeMemoryProfiler();
425 #endif
426
427 if (flags()->stop_on_start) {
428 Printf("ThreadSanitizer is suspended at startup (pid %d)."
429 " Call __tsan_resume().\n",
430 (int)internal_getpid());
431 while (__tsan_resumed == 0) {}
432 }
433
434 OnInitialize();
435 }
436
MaybeSpawnBackgroundThread()437 void MaybeSpawnBackgroundThread() {
438 // On MIPS, TSan initialization is run before
439 // __pthread_initialize_minimal_internal() is finished, so we can not spawn
440 // new threads.
441 #if !SANITIZER_GO && !defined(__mips__)
442 static atomic_uint32_t bg_thread = {};
443 if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
444 atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
445 StartBackgroundThread();
446 SetSandboxingCallback(StopBackgroundThread);
447 }
448 #endif
449 }
450
451
Finalize(ThreadState * thr)452 int Finalize(ThreadState *thr) {
453 bool failed = false;
454
455 if (common_flags()->print_module_map == 1)
456 DumpProcessMap();
457
458 if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
459 SleepForMillis(flags()->atexit_sleep_ms);
460
461 // Wait for pending reports.
462 ctx->report_mtx.Lock();
463 { ScopedErrorReportLock l; }
464 ctx->report_mtx.Unlock();
465
466 #if !SANITIZER_GO
467 if (Verbosity()) AllocatorPrintStats();
468 #endif
469
470 ThreadFinalize(thr);
471
472 if (ctx->nreported) {
473 failed = true;
474 #if !SANITIZER_GO
475 Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
476 #else
477 Printf("Found %d data race(s)\n", ctx->nreported);
478 #endif
479 }
480
481 if (common_flags()->print_suppressions)
482 PrintMatchedSuppressions();
483
484 failed = OnFinalize(failed);
485
486 return failed ? common_flags()->exitcode : 0;
487 }
488
489 #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)490 void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
491 ctx->thread_registry.Lock();
492 ctx->report_mtx.Lock();
493 ScopedErrorReportLock::Lock();
494 // Suppress all reports in the pthread_atfork callbacks.
495 // Reports will deadlock on the report_mtx.
496 // We could ignore sync operations as well,
497 // but so far it's unclear if it will do more good or harm.
498 // Unnecessarily ignoring things can lead to false positives later.
499 thr->suppress_reports++;
500 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
501 // we'll assert in CheckNoLocks() unless we ignore interceptors.
502 thr->ignore_interceptors++;
503 }
504
ForkParentAfter(ThreadState * thr,uptr pc)505 void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
506 thr->suppress_reports--; // Enabled in ForkBefore.
507 thr->ignore_interceptors--;
508 ScopedErrorReportLock::Unlock();
509 ctx->report_mtx.Unlock();
510 ctx->thread_registry.Unlock();
511 }
512
ForkChildAfter(ThreadState * thr,uptr pc,bool start_thread)513 void ForkChildAfter(ThreadState *thr, uptr pc,
514 bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
515 thr->suppress_reports--; // Enabled in ForkBefore.
516 thr->ignore_interceptors--;
517 ScopedErrorReportLock::Unlock();
518 ctx->report_mtx.Unlock();
519 ctx->thread_registry.Unlock();
520
521 uptr nthread = 0;
522 ctx->thread_registry.GetNumberOfThreads(0, 0, &nthread /* alive threads */);
523 VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
524 " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
525 if (nthread == 1) {
526 if (start_thread)
527 StartBackgroundThread();
528 } else {
529 // We've just forked a multi-threaded process. We cannot reasonably function
530 // after that (some mutexes may be locked before fork). So just enable
531 // ignores for everything in the hope that we will exec soon.
532 ctx->after_multithreaded_fork = true;
533 thr->ignore_interceptors++;
534 ThreadIgnoreBegin(thr, pc);
535 ThreadIgnoreSyncBegin(thr, pc);
536 }
537 }
538 #endif
539
540 #if SANITIZER_GO
541 NOINLINE
GrowShadowStack(ThreadState * thr)542 void GrowShadowStack(ThreadState *thr) {
543 const int sz = thr->shadow_stack_end - thr->shadow_stack;
544 const int newsz = 2 * sz;
545 auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
546 internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
547 Free(thr->shadow_stack);
548 thr->shadow_stack = newstack;
549 thr->shadow_stack_pos = newstack + sz;
550 thr->shadow_stack_end = newstack + newsz;
551 }
552 #endif
553
CurrentStackId(ThreadState * thr,uptr pc)554 StackID CurrentStackId(ThreadState *thr, uptr pc) {
555 if (!thr->is_inited) // May happen during bootstrap.
556 return kInvalidStackID;
557 if (pc != 0) {
558 #if !SANITIZER_GO
559 DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
560 #else
561 if (thr->shadow_stack_pos == thr->shadow_stack_end)
562 GrowShadowStack(thr);
563 #endif
564 thr->shadow_stack_pos[0] = pc;
565 thr->shadow_stack_pos++;
566 }
567 StackID id = StackDepotPut(
568 StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
569 if (pc != 0)
570 thr->shadow_stack_pos--;
571 return id;
572 }
573
574 namespace v3 {
575
576 NOINLINE
TraceSwitchPart(ThreadState * thr)577 void TraceSwitchPart(ThreadState *thr) {
578 Trace *trace = &thr->tctx->trace;
579 Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
580 DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
581 auto *part = trace->parts.Back();
582 DPrintf("TraceSwitchPart part=%p pos=%p\n", part, pos);
583 if (part) {
584 // We can get here when we still have space in the current trace part.
585 // The fast-path check in TraceAcquire has false positives in the middle of
586 // the part. Check if we are indeed at the end of the current part or not,
587 // and fill any gaps with NopEvent's.
588 Event *end = &part->events[TracePart::kSize];
589 DCHECK_GE(pos, &part->events[0]);
590 DCHECK_LE(pos, end);
591 if (pos + 1 < end) {
592 if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
593 TracePart::kAlignment)
594 *pos++ = NopEvent;
595 *pos++ = NopEvent;
596 DCHECK_LE(pos + 2, end);
597 atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
598 // Ensure we setup trace so that the next TraceAcquire
599 // won't detect trace part end.
600 Event *ev;
601 CHECK(TraceAcquire(thr, &ev));
602 return;
603 }
604 // We are indeed at the end.
605 for (; pos < end; pos++) *pos = NopEvent;
606 }
607 #if !SANITIZER_GO
608 if (ctx->after_multithreaded_fork) {
609 // We just need to survive till exec.
610 CHECK(part);
611 atomic_store_relaxed(&thr->trace_pos,
612 reinterpret_cast<uptr>(&part->events[0]));
613 return;
614 }
615 #endif
616 part = new (MmapOrDie(sizeof(TracePart), "TracePart")) TracePart();
617 part->trace = trace;
618 thr->trace_prev_pc = 0;
619 {
620 Lock lock(&trace->mtx);
621 trace->parts.PushBack(part);
622 atomic_store_relaxed(&thr->trace_pos,
623 reinterpret_cast<uptr>(&part->events[0]));
624 }
625 // Make this part self-sufficient by restoring the current stack
626 // and mutex set in the beginning of the trace.
627 TraceTime(thr);
628 for (uptr *pos = &thr->shadow_stack[0]; pos < thr->shadow_stack_pos; pos++)
629 CHECK(TryTraceFunc(thr, *pos));
630 for (uptr i = 0; i < thr->mset.Size(); i++) {
631 MutexSet::Desc d = thr->mset.Get(i);
632 TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
633 d.addr, d.stack_id);
634 }
635 }
636
637 } // namespace v3
638
TraceSwitch(ThreadState * thr)639 void TraceSwitch(ThreadState *thr) {
640 #if !SANITIZER_GO
641 if (ctx->after_multithreaded_fork)
642 return;
643 #endif
644 thr->nomalloc++;
645 Trace *thr_trace = ThreadTrace(thr->tid);
646 Lock l(&thr_trace->mtx);
647 unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
648 TraceHeader *hdr = &thr_trace->headers[trace];
649 hdr->epoch0 = thr->fast_state.epoch();
650 ObtainCurrentStack(thr, 0, &hdr->stack0);
651 hdr->mset0 = thr->mset;
652 thr->nomalloc--;
653 }
654
ThreadTrace(Tid tid)655 Trace *ThreadTrace(Tid tid) { return (Trace *)GetThreadTraceHeader(tid); }
656
TraceTopPC(ThreadState * thr)657 uptr TraceTopPC(ThreadState *thr) {
658 Event *events = (Event*)GetThreadTrace(thr->tid);
659 uptr pc = events[thr->fast_state.GetTracePos()];
660 return pc;
661 }
662
TraceSize()663 uptr TraceSize() {
664 return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
665 }
666
TraceParts()667 uptr TraceParts() {
668 return TraceSize() / kTracePartSize;
669 }
670
671 #if !SANITIZER_GO
__tsan_trace_switch()672 extern "C" void __tsan_trace_switch() {
673 TraceSwitch(cur_thread());
674 }
675
__tsan_report_race()676 extern "C" void __tsan_report_race() {
677 ReportRace(cur_thread());
678 }
679 #endif
680
ThreadIgnoreBegin(ThreadState * thr,uptr pc)681 void ThreadIgnoreBegin(ThreadState *thr, uptr pc) {
682 DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
683 thr->ignore_reads_and_writes++;
684 CHECK_GT(thr->ignore_reads_and_writes, 0);
685 thr->fast_state.SetIgnoreBit();
686 #if !SANITIZER_GO
687 if (pc && !ctx->after_multithreaded_fork)
688 thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
689 #endif
690 }
691
ThreadIgnoreEnd(ThreadState * thr)692 void ThreadIgnoreEnd(ThreadState *thr) {
693 DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
694 CHECK_GT(thr->ignore_reads_and_writes, 0);
695 thr->ignore_reads_and_writes--;
696 if (thr->ignore_reads_and_writes == 0) {
697 thr->fast_state.ClearIgnoreBit();
698 #if !SANITIZER_GO
699 thr->mop_ignore_set.Reset();
700 #endif
701 }
702 }
703
704 #if !SANITIZER_GO
705 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()706 uptr __tsan_testonly_shadow_stack_current_size() {
707 ThreadState *thr = cur_thread();
708 return thr->shadow_stack_pos - thr->shadow_stack;
709 }
710 #endif
711
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)712 void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
713 DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
714 thr->ignore_sync++;
715 CHECK_GT(thr->ignore_sync, 0);
716 #if !SANITIZER_GO
717 if (pc && !ctx->after_multithreaded_fork)
718 thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
719 #endif
720 }
721
ThreadIgnoreSyncEnd(ThreadState * thr)722 void ThreadIgnoreSyncEnd(ThreadState *thr) {
723 DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
724 CHECK_GT(thr->ignore_sync, 0);
725 thr->ignore_sync--;
726 #if !SANITIZER_GO
727 if (thr->ignore_sync == 0)
728 thr->sync_ignore_set.Reset();
729 #endif
730 }
731
operator ==(const MD5Hash & other) const732 bool MD5Hash::operator==(const MD5Hash &other) const {
733 return hash[0] == other.hash[0] && hash[1] == other.hash[1];
734 }
735
736 #if SANITIZER_DEBUG
build_consistency_debug()737 void build_consistency_debug() {}
738 #else
build_consistency_release()739 void build_consistency_release() {}
740 #endif
741
742 } // namespace __tsan
743
744 #if SANITIZER_CHECK_DEADLOCKS
745 namespace __sanitizer {
746 using namespace __tsan;
747 MutexMeta mutex_meta[] = {
748 {MutexInvalid, "Invalid", {}},
749 {MutexThreadRegistry, "ThreadRegistry", {}},
750 {MutexTypeTrace, "Trace", {MutexLeaf}},
751 {MutexTypeReport, "Report", {MutexTypeSyncVar}},
752 {MutexTypeSyncVar, "SyncVar", {}},
753 {MutexTypeAnnotations, "Annotations", {}},
754 {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},
755 {MutexTypeFired, "Fired", {MutexLeaf}},
756 {MutexTypeRacy, "Racy", {MutexLeaf}},
757 {MutexTypeGlobalProc, "GlobalProc", {}},
758 {},
759 };
760
PrintMutexPC(uptr pc)761 void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
762 } // namespace __sanitizer
763 #endif
764