xref: /openbsd-src/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- tsan_rtl_report.cpp -----------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick 
133cab2bb3Spatrick #include "sanitizer_common/sanitizer_libc.h"
143cab2bb3Spatrick #include "sanitizer_common/sanitizer_placement_new.h"
153cab2bb3Spatrick #include "sanitizer_common/sanitizer_stackdepot.h"
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_stacktrace.h"
183cab2bb3Spatrick #include "tsan_platform.h"
193cab2bb3Spatrick #include "tsan_rtl.h"
203cab2bb3Spatrick #include "tsan_suppressions.h"
213cab2bb3Spatrick #include "tsan_symbolize.h"
223cab2bb3Spatrick #include "tsan_report.h"
233cab2bb3Spatrick #include "tsan_sync.h"
243cab2bb3Spatrick #include "tsan_mman.h"
253cab2bb3Spatrick #include "tsan_flags.h"
263cab2bb3Spatrick #include "tsan_fd.h"
273cab2bb3Spatrick 
283cab2bb3Spatrick namespace __tsan {
293cab2bb3Spatrick 
303cab2bb3Spatrick using namespace __sanitizer;
313cab2bb3Spatrick 
323cab2bb3Spatrick static ReportStack *SymbolizeStack(StackTrace trace);
333cab2bb3Spatrick 
343cab2bb3Spatrick // Can be overriden by an application/test to intercept reports.
353cab2bb3Spatrick #ifdef TSAN_EXTERNAL_HOOKS
363cab2bb3Spatrick bool OnReport(const ReportDesc *rep, bool suppressed);
373cab2bb3Spatrick #else
383cab2bb3Spatrick SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnReport(const ReportDesc * rep,bool suppressed)393cab2bb3Spatrick bool OnReport(const ReportDesc *rep, bool suppressed) {
403cab2bb3Spatrick   (void)rep;
413cab2bb3Spatrick   return suppressed;
423cab2bb3Spatrick }
433cab2bb3Spatrick #endif
443cab2bb3Spatrick 
453cab2bb3Spatrick SANITIZER_WEAK_DEFAULT_IMPL
__tsan_on_report(const ReportDesc * rep)463cab2bb3Spatrick void __tsan_on_report(const ReportDesc *rep) {
473cab2bb3Spatrick   (void)rep;
483cab2bb3Spatrick }
493cab2bb3Spatrick 
StackStripMain(SymbolizedStack * frames)503cab2bb3Spatrick static void StackStripMain(SymbolizedStack *frames) {
513cab2bb3Spatrick   SymbolizedStack *last_frame = nullptr;
523cab2bb3Spatrick   SymbolizedStack *last_frame2 = nullptr;
533cab2bb3Spatrick   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
543cab2bb3Spatrick     last_frame2 = last_frame;
553cab2bb3Spatrick     last_frame = cur;
563cab2bb3Spatrick   }
573cab2bb3Spatrick 
583cab2bb3Spatrick   if (last_frame2 == 0)
593cab2bb3Spatrick     return;
603cab2bb3Spatrick #if !SANITIZER_GO
613cab2bb3Spatrick   const char *last = last_frame->info.function;
623cab2bb3Spatrick   const char *last2 = last_frame2->info.function;
633cab2bb3Spatrick   // Strip frame above 'main'
643cab2bb3Spatrick   if (last2 && 0 == internal_strcmp(last2, "main")) {
653cab2bb3Spatrick     last_frame->ClearAll();
663cab2bb3Spatrick     last_frame2->next = nullptr;
673cab2bb3Spatrick   // Strip our internal thread start routine.
683cab2bb3Spatrick   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
693cab2bb3Spatrick     last_frame->ClearAll();
703cab2bb3Spatrick     last_frame2->next = nullptr;
71*810390e3Srobert     // Strip global ctors init, .preinit_array and main caller.
72*810390e3Srobert   } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
73*810390e3Srobert                       0 == internal_strcmp(last, "__libc_csu_init") ||
74*810390e3Srobert                       0 == internal_strcmp(last, "__libc_start_main"))) {
753cab2bb3Spatrick     last_frame->ClearAll();
763cab2bb3Spatrick     last_frame2->next = nullptr;
773cab2bb3Spatrick   // If both are 0, then we probably just failed to symbolize.
783cab2bb3Spatrick   } else if (last || last2) {
793cab2bb3Spatrick     // Ensure that we recovered stack completely. Trimmed stack
803cab2bb3Spatrick     // can actually happen if we do not instrument some code,
813cab2bb3Spatrick     // so it's only a debug print. However we must try hard to not miss it
823cab2bb3Spatrick     // due to our fault.
833cab2bb3Spatrick     DPrintf("Bottom stack frame is missed\n");
843cab2bb3Spatrick   }
853cab2bb3Spatrick #else
863cab2bb3Spatrick   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
873cab2bb3Spatrick   last_frame->ClearAll();
883cab2bb3Spatrick   last_frame2->next = nullptr;
893cab2bb3Spatrick #endif
903cab2bb3Spatrick }
913cab2bb3Spatrick 
SymbolizeStackId(u32 stack_id)923cab2bb3Spatrick ReportStack *SymbolizeStackId(u32 stack_id) {
933cab2bb3Spatrick   if (stack_id == 0)
943cab2bb3Spatrick     return 0;
953cab2bb3Spatrick   StackTrace stack = StackDepotGet(stack_id);
963cab2bb3Spatrick   if (stack.trace == nullptr)
973cab2bb3Spatrick     return nullptr;
983cab2bb3Spatrick   return SymbolizeStack(stack);
993cab2bb3Spatrick }
1003cab2bb3Spatrick 
SymbolizeStack(StackTrace trace)1013cab2bb3Spatrick static ReportStack *SymbolizeStack(StackTrace trace) {
1023cab2bb3Spatrick   if (trace.size == 0)
1033cab2bb3Spatrick     return 0;
1043cab2bb3Spatrick   SymbolizedStack *top = nullptr;
1053cab2bb3Spatrick   for (uptr si = 0; si < trace.size; si++) {
1063cab2bb3Spatrick     const uptr pc = trace.trace[si];
1073cab2bb3Spatrick     uptr pc1 = pc;
1083cab2bb3Spatrick     // We obtain the return address, but we're interested in the previous
1093cab2bb3Spatrick     // instruction.
1103cab2bb3Spatrick     if ((pc & kExternalPCBit) == 0)
1113cab2bb3Spatrick       pc1 = StackTrace::GetPreviousInstructionPc(pc);
1123cab2bb3Spatrick     SymbolizedStack *ent = SymbolizeCode(pc1);
1133cab2bb3Spatrick     CHECK_NE(ent, 0);
1143cab2bb3Spatrick     SymbolizedStack *last = ent;
1153cab2bb3Spatrick     while (last->next) {
1163cab2bb3Spatrick       last->info.address = pc;  // restore original pc for report
1173cab2bb3Spatrick       last = last->next;
1183cab2bb3Spatrick     }
1193cab2bb3Spatrick     last->info.address = pc;  // restore original pc for report
1203cab2bb3Spatrick     last->next = top;
1213cab2bb3Spatrick     top = ent;
1223cab2bb3Spatrick   }
1233cab2bb3Spatrick   StackStripMain(top);
1243cab2bb3Spatrick 
125*810390e3Srobert   auto *stack = New<ReportStack>();
1263cab2bb3Spatrick   stack->frames = top;
1273cab2bb3Spatrick   return stack;
1283cab2bb3Spatrick }
1293cab2bb3Spatrick 
ShouldReport(ThreadState * thr,ReportType typ)130d89ec533Spatrick bool ShouldReport(ThreadState *thr, ReportType typ) {
131d89ec533Spatrick   // We set thr->suppress_reports in the fork context.
132d89ec533Spatrick   // Taking any locking in the fork context can lead to deadlocks.
133d89ec533Spatrick   // If any locks are already taken, it's too late to do this check.
134d89ec533Spatrick   CheckedMutex::CheckNoLocks();
135d89ec533Spatrick   // For the same reason check we didn't lock thread_registry yet.
136d89ec533Spatrick   if (SANITIZER_DEBUG)
137*810390e3Srobert     ThreadRegistryLock l(&ctx->thread_registry);
138d89ec533Spatrick   if (!flags()->report_bugs || thr->suppress_reports)
139d89ec533Spatrick     return false;
140d89ec533Spatrick   switch (typ) {
141d89ec533Spatrick     case ReportTypeSignalUnsafe:
142d89ec533Spatrick       return flags()->report_signal_unsafe;
143d89ec533Spatrick     case ReportTypeThreadLeak:
144d89ec533Spatrick #if !SANITIZER_GO
145d89ec533Spatrick       // It's impossible to join phantom threads
146d89ec533Spatrick       // in the child after fork.
147d89ec533Spatrick       if (ctx->after_multithreaded_fork)
148d89ec533Spatrick         return false;
149d89ec533Spatrick #endif
150d89ec533Spatrick       return flags()->report_thread_leaks;
151d89ec533Spatrick     case ReportTypeMutexDestroyLocked:
152d89ec533Spatrick       return flags()->report_destroy_locked;
153d89ec533Spatrick     default:
154d89ec533Spatrick       return true;
155d89ec533Spatrick   }
156d89ec533Spatrick }
157d89ec533Spatrick 
ScopedReportBase(ReportType typ,uptr tag)1583cab2bb3Spatrick ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
159*810390e3Srobert   ctx->thread_registry.CheckLocked();
160*810390e3Srobert   rep_ = New<ReportDesc>();
1613cab2bb3Spatrick   rep_->typ = typ;
1623cab2bb3Spatrick   rep_->tag = tag;
1633cab2bb3Spatrick   ctx->report_mtx.Lock();
1643cab2bb3Spatrick }
1653cab2bb3Spatrick 
~ScopedReportBase()1663cab2bb3Spatrick ScopedReportBase::~ScopedReportBase() {
1673cab2bb3Spatrick   ctx->report_mtx.Unlock();
1683cab2bb3Spatrick   DestroyAndFree(rep_);
1693cab2bb3Spatrick }
1703cab2bb3Spatrick 
AddStack(StackTrace stack,bool suppressable)1713cab2bb3Spatrick void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
1723cab2bb3Spatrick   ReportStack **rs = rep_->stacks.PushBack();
1733cab2bb3Spatrick   *rs = SymbolizeStack(stack);
1743cab2bb3Spatrick   (*rs)->suppressable = suppressable;
1753cab2bb3Spatrick }
1763cab2bb3Spatrick 
AddMemoryAccess(uptr addr,uptr external_tag,Shadow s,Tid tid,StackTrace stack,const MutexSet * mset)1773cab2bb3Spatrick void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178*810390e3Srobert                                        Tid tid, StackTrace stack,
179*810390e3Srobert                                        const MutexSet *mset) {
180*810390e3Srobert   uptr addr0, size;
181*810390e3Srobert   AccessType typ;
182*810390e3Srobert   s.GetAccess(&addr0, &size, &typ);
183*810390e3Srobert   auto *mop = New<ReportMop>();
1843cab2bb3Spatrick   rep_->mops.PushBack(mop);
185*810390e3Srobert   mop->tid = tid;
186*810390e3Srobert   mop->addr = addr + addr0;
187*810390e3Srobert   mop->size = size;
188*810390e3Srobert   mop->write = !(typ & kAccessRead);
189*810390e3Srobert   mop->atomic = typ & kAccessAtomic;
1903cab2bb3Spatrick   mop->stack = SymbolizeStack(stack);
1913cab2bb3Spatrick   mop->external_tag = external_tag;
1923cab2bb3Spatrick   if (mop->stack)
1933cab2bb3Spatrick     mop->stack->suppressable = true;
1943cab2bb3Spatrick   for (uptr i = 0; i < mset->Size(); i++) {
1953cab2bb3Spatrick     MutexSet::Desc d = mset->Get(i);
196*810390e3Srobert     int id = this->AddMutex(d.addr, d.stack_id);
197*810390e3Srobert     ReportMopMutex mtx = {id, d.write};
1983cab2bb3Spatrick     mop->mset.PushBack(mtx);
1993cab2bb3Spatrick   }
2003cab2bb3Spatrick }
2013cab2bb3Spatrick 
AddUniqueTid(Tid unique_tid)202*810390e3Srobert void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
2033cab2bb3Spatrick   rep_->unique_tids.PushBack(unique_tid);
2043cab2bb3Spatrick }
2053cab2bb3Spatrick 
AddThread(const ThreadContext * tctx,bool suppressable)2063cab2bb3Spatrick void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
2073cab2bb3Spatrick   for (uptr i = 0; i < rep_->threads.Size(); i++) {
2083cab2bb3Spatrick     if ((u32)rep_->threads[i]->id == tctx->tid)
2093cab2bb3Spatrick       return;
2103cab2bb3Spatrick   }
211*810390e3Srobert   auto *rt = New<ReportThread>();
2123cab2bb3Spatrick   rep_->threads.PushBack(rt);
2133cab2bb3Spatrick   rt->id = tctx->tid;
2143cab2bb3Spatrick   rt->os_id = tctx->os_id;
2153cab2bb3Spatrick   rt->running = (tctx->status == ThreadStatusRunning);
2163cab2bb3Spatrick   rt->name = internal_strdup(tctx->name);
2173cab2bb3Spatrick   rt->parent_tid = tctx->parent_tid;
2183cab2bb3Spatrick   rt->thread_type = tctx->thread_type;
2193cab2bb3Spatrick   rt->stack = 0;
2203cab2bb3Spatrick   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
2213cab2bb3Spatrick   if (rt->stack)
2223cab2bb3Spatrick     rt->stack->suppressable = suppressable;
2233cab2bb3Spatrick }
2243cab2bb3Spatrick 
2253cab2bb3Spatrick #if !SANITIZER_GO
FindThreadByTidLocked(Tid tid)226*810390e3Srobert static ThreadContext *FindThreadByTidLocked(Tid tid) {
227*810390e3Srobert   ctx->thread_registry.CheckLocked();
2283cab2bb3Spatrick   return static_cast<ThreadContext *>(
229*810390e3Srobert       ctx->thread_registry.GetThreadLocked(tid));
2303cab2bb3Spatrick }
2313cab2bb3Spatrick 
IsInStackOrTls(ThreadContextBase * tctx_base,void * arg)2323cab2bb3Spatrick static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
2333cab2bb3Spatrick   uptr addr = (uptr)arg;
2343cab2bb3Spatrick   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
2353cab2bb3Spatrick   if (tctx->status != ThreadStatusRunning)
2363cab2bb3Spatrick     return false;
2373cab2bb3Spatrick   ThreadState *thr = tctx->thr;
2383cab2bb3Spatrick   CHECK(thr);
2393cab2bb3Spatrick   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
2403cab2bb3Spatrick           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
2413cab2bb3Spatrick }
2423cab2bb3Spatrick 
IsThreadStackOrTls(uptr addr,bool * is_stack)2433cab2bb3Spatrick ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
244*810390e3Srobert   ctx->thread_registry.CheckLocked();
245*810390e3Srobert   ThreadContext *tctx =
246*810390e3Srobert       static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
247*810390e3Srobert           IsInStackOrTls, (void *)addr));
2483cab2bb3Spatrick   if (!tctx)
2493cab2bb3Spatrick     return 0;
2503cab2bb3Spatrick   ThreadState *thr = tctx->thr;
2513cab2bb3Spatrick   CHECK(thr);
2523cab2bb3Spatrick   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
2533cab2bb3Spatrick   return tctx;
2543cab2bb3Spatrick }
2553cab2bb3Spatrick #endif
2563cab2bb3Spatrick 
AddThread(Tid tid,bool suppressable)257*810390e3Srobert void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
2583cab2bb3Spatrick #if !SANITIZER_GO
259*810390e3Srobert   if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
2603cab2bb3Spatrick     AddThread(tctx, suppressable);
2613cab2bb3Spatrick #endif
2623cab2bb3Spatrick }
2633cab2bb3Spatrick 
AddMutex(uptr addr,StackID creation_stack_id)264*810390e3Srobert int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
2653cab2bb3Spatrick   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
266*810390e3Srobert     if (rep_->mutexes[i]->addr == addr)
267*810390e3Srobert       return rep_->mutexes[i]->id;
2683cab2bb3Spatrick   }
269*810390e3Srobert   auto *rm = New<ReportMutex>();
2703cab2bb3Spatrick   rep_->mutexes.PushBack(rm);
271*810390e3Srobert   rm->id = rep_->mutexes.Size() - 1;
272*810390e3Srobert   rm->addr = addr;
273*810390e3Srobert   rm->stack = SymbolizeStackId(creation_stack_id);
274*810390e3Srobert   return rm->id;
2753cab2bb3Spatrick }
2763cab2bb3Spatrick 
AddLocation(uptr addr,uptr size)2773cab2bb3Spatrick void ScopedReportBase::AddLocation(uptr addr, uptr size) {
2783cab2bb3Spatrick   if (addr == 0)
2793cab2bb3Spatrick     return;
2803cab2bb3Spatrick #if !SANITIZER_GO
2813cab2bb3Spatrick   int fd = -1;
282*810390e3Srobert   Tid creat_tid = kInvalidTid;
283*810390e3Srobert   StackID creat_stack = 0;
284*810390e3Srobert   bool closed = false;
285*810390e3Srobert   if (FdLocation(addr, &fd, &creat_tid, &creat_stack, &closed)) {
286*810390e3Srobert     auto *loc = New<ReportLocation>();
287*810390e3Srobert     loc->type = ReportLocationFD;
288*810390e3Srobert     loc->fd_closed = closed;
2893cab2bb3Spatrick     loc->fd = fd;
2903cab2bb3Spatrick     loc->tid = creat_tid;
2913cab2bb3Spatrick     loc->stack = SymbolizeStackId(creat_stack);
2923cab2bb3Spatrick     rep_->locs.PushBack(loc);
293*810390e3Srobert     AddThread(creat_tid);
2943cab2bb3Spatrick     return;
2953cab2bb3Spatrick   }
2963cab2bb3Spatrick   MBlock *b = 0;
297*810390e3Srobert   uptr block_begin = 0;
2983cab2bb3Spatrick   Allocator *a = allocator();
2993cab2bb3Spatrick   if (a->PointerIsMine((void*)addr)) {
300*810390e3Srobert     block_begin = (uptr)a->GetBlockBegin((void *)addr);
3013cab2bb3Spatrick     if (block_begin)
302*810390e3Srobert       b = ctx->metamap.GetBlock(block_begin);
3033cab2bb3Spatrick   }
304*810390e3Srobert   if (!b)
305*810390e3Srobert     b = JavaHeapBlock(addr, &block_begin);
3063cab2bb3Spatrick   if (b != 0) {
307*810390e3Srobert     auto *loc = New<ReportLocation>();
308*810390e3Srobert     loc->type = ReportLocationHeap;
309*810390e3Srobert     loc->heap_chunk_start = block_begin;
3103cab2bb3Spatrick     loc->heap_chunk_size = b->siz;
3113cab2bb3Spatrick     loc->external_tag = b->tag;
312*810390e3Srobert     loc->tid = b->tid;
3133cab2bb3Spatrick     loc->stack = SymbolizeStackId(b->stk);
3143cab2bb3Spatrick     rep_->locs.PushBack(loc);
315*810390e3Srobert     AddThread(b->tid);
3163cab2bb3Spatrick     return;
3173cab2bb3Spatrick   }
3183cab2bb3Spatrick   bool is_stack = false;
3193cab2bb3Spatrick   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
320*810390e3Srobert     auto *loc = New<ReportLocation>();
321*810390e3Srobert     loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
3223cab2bb3Spatrick     loc->tid = tctx->tid;
3233cab2bb3Spatrick     rep_->locs.PushBack(loc);
3243cab2bb3Spatrick     AddThread(tctx);
3253cab2bb3Spatrick   }
3263cab2bb3Spatrick #endif
3273cab2bb3Spatrick   if (ReportLocation *loc = SymbolizeData(addr)) {
3283cab2bb3Spatrick     loc->suppressable = true;
3293cab2bb3Spatrick     rep_->locs.PushBack(loc);
3303cab2bb3Spatrick     return;
3313cab2bb3Spatrick   }
3323cab2bb3Spatrick }
3333cab2bb3Spatrick 
3343cab2bb3Spatrick #if !SANITIZER_GO
AddSleep(StackID stack_id)335*810390e3Srobert void ScopedReportBase::AddSleep(StackID stack_id) {
3363cab2bb3Spatrick   rep_->sleep = SymbolizeStackId(stack_id);
3373cab2bb3Spatrick }
3383cab2bb3Spatrick #endif
3393cab2bb3Spatrick 
SetCount(int count)3403cab2bb3Spatrick void ScopedReportBase::SetCount(int count) { rep_->count = count; }
3413cab2bb3Spatrick 
SetSigNum(int sig)342*810390e3Srobert void ScopedReportBase::SetSigNum(int sig) { rep_->signum = sig; }
343*810390e3Srobert 
GetReport() const3443cab2bb3Spatrick const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
3453cab2bb3Spatrick 
ScopedReport(ReportType typ,uptr tag)3463cab2bb3Spatrick ScopedReport::ScopedReport(ReportType typ, uptr tag)
3473cab2bb3Spatrick     : ScopedReportBase(typ, tag) {}
3483cab2bb3Spatrick 
~ScopedReport()3493cab2bb3Spatrick ScopedReport::~ScopedReport() {}
3503cab2bb3Spatrick 
351*810390e3Srobert // Replays the trace up to last_pos position in the last part
352*810390e3Srobert // or up to the provided epoch/sid (whichever is earlier)
353*810390e3Srobert // and calls the provided function f for each event.
354*810390e3Srobert template <typename Func>
TraceReplay(Trace * trace,TracePart * last,Event * last_pos,Sid sid,Epoch epoch,Func f)355*810390e3Srobert void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
356*810390e3Srobert                  Epoch epoch, Func f) {
357*810390e3Srobert   TracePart *part = trace->parts.Front();
358*810390e3Srobert   Sid ev_sid = kFreeSid;
359*810390e3Srobert   Epoch ev_epoch = kEpochOver;
360*810390e3Srobert   for (;;) {
361*810390e3Srobert     DCHECK_EQ(part->trace, trace);
362*810390e3Srobert     // Note: an event can't start in the last element.
363*810390e3Srobert     // Since an event can take up to 2 elements,
364*810390e3Srobert     // we ensure we have at least 2 before adding an event.
365*810390e3Srobert     Event *end = &part->events[TracePart::kSize - 1];
366*810390e3Srobert     if (part == last)
367*810390e3Srobert       end = last_pos;
368*810390e3Srobert     f(kFreeSid, kEpochOver, nullptr);  // notify about part start
369*810390e3Srobert     for (Event *evp = &part->events[0]; evp < end; evp++) {
370*810390e3Srobert       Event *evp0 = evp;
371*810390e3Srobert       if (!evp->is_access && !evp->is_func) {
372*810390e3Srobert         switch (evp->type) {
373*810390e3Srobert           case EventType::kTime: {
374*810390e3Srobert             auto *ev = reinterpret_cast<EventTime *>(evp);
375*810390e3Srobert             ev_sid = static_cast<Sid>(ev->sid);
376*810390e3Srobert             ev_epoch = static_cast<Epoch>(ev->epoch);
377*810390e3Srobert             if (ev_sid == sid && ev_epoch > epoch)
378*810390e3Srobert               return;
379*810390e3Srobert             break;
380*810390e3Srobert           }
381*810390e3Srobert           case EventType::kAccessExt:
382*810390e3Srobert             FALLTHROUGH;
383*810390e3Srobert           case EventType::kAccessRange:
384*810390e3Srobert             FALLTHROUGH;
385*810390e3Srobert           case EventType::kLock:
386*810390e3Srobert             FALLTHROUGH;
387*810390e3Srobert           case EventType::kRLock:
388*810390e3Srobert             // These take 2 Event elements.
389*810390e3Srobert             evp++;
390*810390e3Srobert             break;
391*810390e3Srobert           case EventType::kUnlock:
392*810390e3Srobert             // This takes 1 Event element.
393*810390e3Srobert             break;
394*810390e3Srobert         }
395*810390e3Srobert       }
396*810390e3Srobert       CHECK_NE(ev_sid, kFreeSid);
397*810390e3Srobert       CHECK_NE(ev_epoch, kEpochOver);
398*810390e3Srobert       f(ev_sid, ev_epoch, evp0);
399*810390e3Srobert     }
400*810390e3Srobert     if (part == last)
401*810390e3Srobert       return;
402*810390e3Srobert     part = trace->parts.Next(part);
403*810390e3Srobert     CHECK(part);
404*810390e3Srobert   }
405*810390e3Srobert   CHECK(0);
406*810390e3Srobert }
407*810390e3Srobert 
RestoreStackMatch(VarSizeStackTrace * pstk,MutexSet * pmset,Vector<uptr> * stack,MutexSet * mset,uptr pc,bool * found)408*810390e3Srobert static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
409*810390e3Srobert                               Vector<uptr> *stack, MutexSet *mset, uptr pc,
410*810390e3Srobert                               bool *found) {
411*810390e3Srobert   DPrintf2("    MATCHED\n");
412*810390e3Srobert   *pmset = *mset;
413*810390e3Srobert   stack->PushBack(pc);
414*810390e3Srobert   pstk->Init(&(*stack)[0], stack->Size());
415*810390e3Srobert   stack->PopBack();
416*810390e3Srobert   *found = true;
417*810390e3Srobert }
418*810390e3Srobert 
419*810390e3Srobert // Checks if addr1|size1 is fully contained in addr2|size2.
420*810390e3Srobert // We check for fully contained instread of just overlapping
421*810390e3Srobert // because a memory access is always traced once, but can be
422*810390e3Srobert // split into multiple accesses in the shadow.
IsWithinAccess(uptr addr1,uptr size1,uptr addr2,uptr size2)423*810390e3Srobert static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
424*810390e3Srobert                                      uptr size2) {
425*810390e3Srobert   return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
426*810390e3Srobert }
427*810390e3Srobert 
428*810390e3Srobert // Replays the trace of slot sid up to the target event identified
429*810390e3Srobert // by epoch/addr/size/typ and restores and returns tid, stack, mutex set
430*810390e3Srobert // and tag for that event. If there are multiple such events, it returns
431*810390e3Srobert // the last one. Returns false if the event is not present in the trace.
RestoreStack(EventType type,Sid sid,Epoch epoch,uptr addr,uptr size,AccessType typ,Tid * ptid,VarSizeStackTrace * pstk,MutexSet * pmset,uptr * ptag)432*810390e3Srobert bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
433*810390e3Srobert                   AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
434*810390e3Srobert                   MutexSet *pmset, uptr *ptag) {
4353cab2bb3Spatrick   // This function restores stack trace and mutex set for the thread/epoch.
4363cab2bb3Spatrick   // It does so by getting stack trace and mutex set at the beginning of
4373cab2bb3Spatrick   // trace part, and then replaying the trace till the given epoch.
438*810390e3Srobert   DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
439*810390e3Srobert            static_cast<int>(sid), static_cast<int>(epoch), addr, size,
440*810390e3Srobert            static_cast<int>(typ));
441*810390e3Srobert   ctx->slot_mtx.CheckLocked();  // needed to prevent trace part recycling
442*810390e3Srobert   ctx->thread_registry.CheckLocked();
443*810390e3Srobert   TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
444*810390e3Srobert   Tid tid = kInvalidTid;
445*810390e3Srobert   // Need to lock the slot mutex as it protects slot->journal.
446*810390e3Srobert   slot->mtx.CheckLocked();
447*810390e3Srobert   for (uptr i = 0; i < slot->journal.Size(); i++) {
448*810390e3Srobert     DPrintf2("  journal: epoch=%d tid=%d\n",
449*810390e3Srobert              static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
450*810390e3Srobert     if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
451*810390e3Srobert       tid = slot->journal[i].tid;
452*810390e3Srobert       break;
453*810390e3Srobert     }
454*810390e3Srobert   }
455*810390e3Srobert   if (tid == kInvalidTid)
456*810390e3Srobert     return false;
457*810390e3Srobert   *ptid = tid;
458*810390e3Srobert   ThreadContext *tctx =
459*810390e3Srobert       static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
460*810390e3Srobert   Trace *trace = &tctx->trace;
461*810390e3Srobert   // Snapshot first/last parts and the current position in the last part.
462*810390e3Srobert   TracePart *first_part;
463*810390e3Srobert   TracePart *last_part;
464*810390e3Srobert   Event *last_pos;
465*810390e3Srobert   {
466*810390e3Srobert     Lock lock(&trace->mtx);
467*810390e3Srobert     first_part = trace->parts.Front();
468*810390e3Srobert     if (!first_part) {
469*810390e3Srobert       DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
470*810390e3Srobert       return false;
471*810390e3Srobert     }
472*810390e3Srobert     last_part = trace->parts.Back();
473*810390e3Srobert     last_pos = trace->final_pos;
474*810390e3Srobert     if (tctx->thr)
475*810390e3Srobert       last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
476*810390e3Srobert   }
477*810390e3Srobert   DynamicMutexSet mset;
4783cab2bb3Spatrick   Vector<uptr> stack;
479*810390e3Srobert   uptr prev_pc = 0;
480*810390e3Srobert   bool found = false;
481*810390e3Srobert   bool is_read = typ & kAccessRead;
482*810390e3Srobert   bool is_atomic = typ & kAccessAtomic;
483*810390e3Srobert   bool is_free = typ & kAccessFree;
484*810390e3Srobert   DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
485*810390e3Srobert            trace->parts.Front(), last_part, last_pos);
486*810390e3Srobert   TraceReplay(
487*810390e3Srobert       trace, last_part, last_pos, sid, epoch,
488*810390e3Srobert       [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
489*810390e3Srobert         if (evp == nullptr) {
490*810390e3Srobert           // Each trace part is self-consistent, so we reset state.
491*810390e3Srobert           stack.Resize(0);
492*810390e3Srobert           mset->Reset();
493*810390e3Srobert           prev_pc = 0;
4943cab2bb3Spatrick           return;
495*810390e3Srobert         }
496*810390e3Srobert         bool match = ev_sid == sid && ev_epoch == epoch;
497*810390e3Srobert         if (evp->is_access) {
498*810390e3Srobert           if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
499*810390e3Srobert               evp->_ == 0)  // NopEvent
500*810390e3Srobert             return;
501*810390e3Srobert           auto *ev = reinterpret_cast<EventAccess *>(evp);
502*810390e3Srobert           uptr ev_addr = RestoreAddr(ev->addr);
503*810390e3Srobert           uptr ev_size = 1 << ev->size_log;
504*810390e3Srobert           uptr ev_pc =
505*810390e3Srobert               prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
506*810390e3Srobert           prev_pc = ev_pc;
507*810390e3Srobert           DPrintf2("  Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
508*810390e3Srobert                    ev_addr, ev_size, ev->is_read, ev->is_atomic);
509*810390e3Srobert           if (match && type == EventType::kAccessExt &&
510*810390e3Srobert               IsWithinAccess(addr, size, ev_addr, ev_size) &&
511*810390e3Srobert               is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
512*810390e3Srobert             RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
513*810390e3Srobert           return;
514*810390e3Srobert         }
515*810390e3Srobert         if (evp->is_func) {
516*810390e3Srobert           auto *ev = reinterpret_cast<EventFunc *>(evp);
517*810390e3Srobert           if (ev->pc) {
518*810390e3Srobert             DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
519*810390e3Srobert             stack.PushBack(ev->pc);
520*810390e3Srobert           } else {
521*810390e3Srobert             DPrintf2(" FuncExit\n");
522*810390e3Srobert             // We don't log pathologically large stacks in each part,
523*810390e3Srobert             // if the stack was truncated we can have more func exits than
524*810390e3Srobert             // entries.
525*810390e3Srobert             if (stack.Size())
526*810390e3Srobert               stack.PopBack();
527*810390e3Srobert           }
528*810390e3Srobert           return;
529*810390e3Srobert         }
530*810390e3Srobert         switch (evp->type) {
531*810390e3Srobert           case EventType::kAccessExt: {
532*810390e3Srobert             auto *ev = reinterpret_cast<EventAccessExt *>(evp);
533*810390e3Srobert             uptr ev_addr = RestoreAddr(ev->addr);
534*810390e3Srobert             uptr ev_size = 1 << ev->size_log;
535*810390e3Srobert             prev_pc = ev->pc;
536*810390e3Srobert             DPrintf2("  AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
537*810390e3Srobert                      ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
538*810390e3Srobert             if (match && type == EventType::kAccessExt &&
539*810390e3Srobert                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
540*810390e3Srobert                 is_read == ev->is_read && is_atomic == ev->is_atomic &&
541*810390e3Srobert                 !is_free)
542*810390e3Srobert               RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
543*810390e3Srobert             break;
544*810390e3Srobert           }
545*810390e3Srobert           case EventType::kAccessRange: {
546*810390e3Srobert             auto *ev = reinterpret_cast<EventAccessRange *>(evp);
547*810390e3Srobert             uptr ev_addr = RestoreAddr(ev->addr);
548*810390e3Srobert             uptr ev_size =
549*810390e3Srobert                 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
550*810390e3Srobert             uptr ev_pc = RestoreAddr(ev->pc);
551*810390e3Srobert             prev_pc = ev_pc;
552*810390e3Srobert             DPrintf2("  Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
553*810390e3Srobert                      ev_addr, ev_size, ev->is_read, ev->is_free);
554*810390e3Srobert             if (match && type == EventType::kAccessExt &&
555*810390e3Srobert                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
556*810390e3Srobert                 is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
557*810390e3Srobert               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
558*810390e3Srobert             break;
559*810390e3Srobert           }
560*810390e3Srobert           case EventType::kLock:
561*810390e3Srobert             FALLTHROUGH;
562*810390e3Srobert           case EventType::kRLock: {
563*810390e3Srobert             auto *ev = reinterpret_cast<EventLock *>(evp);
564*810390e3Srobert             bool is_write = ev->type == EventType::kLock;
565*810390e3Srobert             uptr ev_addr = RestoreAddr(ev->addr);
566*810390e3Srobert             uptr ev_pc = RestoreAddr(ev->pc);
567*810390e3Srobert             StackID stack_id =
568*810390e3Srobert                 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
569*810390e3Srobert             DPrintf2("  Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
570*810390e3Srobert                      ev_addr, stack_id, is_write);
571*810390e3Srobert             mset->AddAddr(ev_addr, stack_id, is_write);
572*810390e3Srobert             // Events with ev_pc == 0 are written to the beginning of trace
573*810390e3Srobert             // part as initial mutex set (are not real).
574*810390e3Srobert             if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
575*810390e3Srobert               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
576*810390e3Srobert             break;
577*810390e3Srobert           }
578*810390e3Srobert           case EventType::kUnlock: {
579*810390e3Srobert             auto *ev = reinterpret_cast<EventUnlock *>(evp);
580*810390e3Srobert             uptr ev_addr = RestoreAddr(ev->addr);
581*810390e3Srobert             DPrintf2("  Unlock: addr=0x%zx\n", ev_addr);
582*810390e3Srobert             mset->DelAddr(ev_addr);
583*810390e3Srobert             break;
584*810390e3Srobert           }
585*810390e3Srobert           case EventType::kTime:
586*810390e3Srobert             // TraceReplay already extracted sid/epoch from it,
587*810390e3Srobert             // nothing else to do here.
588*810390e3Srobert             break;
589*810390e3Srobert         }
590*810390e3Srobert       });
591*810390e3Srobert   ExtractTagFromStack(pstk, ptag);
592*810390e3Srobert   return found;
593*810390e3Srobert }
594*810390e3Srobert 
operator ==(const RacyStacks & other) const595*810390e3Srobert bool RacyStacks::operator==(const RacyStacks &other) const {
596*810390e3Srobert   if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
597*810390e3Srobert     return true;
598*810390e3Srobert   if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
599*810390e3Srobert     return true;
600*810390e3Srobert   return false;
6013cab2bb3Spatrick }
6023cab2bb3Spatrick 
FindRacyStacks(const RacyStacks & hash)6031f9cb04fSpatrick static bool FindRacyStacks(const RacyStacks &hash) {
6043cab2bb3Spatrick   for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
6053cab2bb3Spatrick     if (hash == ctx->racy_stacks[i]) {
6061f9cb04fSpatrick       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
6071f9cb04fSpatrick       return true;
6083cab2bb3Spatrick     }
6093cab2bb3Spatrick   }
6101f9cb04fSpatrick   return false;
6113cab2bb3Spatrick }
6121f9cb04fSpatrick 
HandleRacyStacks(ThreadState * thr,VarSizeStackTrace traces[2])6131f9cb04fSpatrick static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
6141f9cb04fSpatrick   if (!flags()->suppress_equal_stacks)
6151f9cb04fSpatrick     return false;
6161f9cb04fSpatrick   RacyStacks hash;
6171f9cb04fSpatrick   hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
6181f9cb04fSpatrick   hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
6191f9cb04fSpatrick   {
6201f9cb04fSpatrick     ReadLock lock(&ctx->racy_mtx);
6211f9cb04fSpatrick     if (FindRacyStacks(hash))
6221f9cb04fSpatrick       return true;
6231f9cb04fSpatrick   }
6241f9cb04fSpatrick   Lock lock(&ctx->racy_mtx);
6251f9cb04fSpatrick   if (FindRacyStacks(hash))
6261f9cb04fSpatrick     return true;
6271f9cb04fSpatrick   ctx->racy_stacks.PushBack(hash);
6281f9cb04fSpatrick   return false;
6291f9cb04fSpatrick }
6301f9cb04fSpatrick 
OutputReport(ThreadState * thr,const ScopedReport & srep)6313cab2bb3Spatrick bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
632d89ec533Spatrick   // These should have been checked in ShouldReport.
633d89ec533Spatrick   // It's too late to check them here, we have already taken locks.
634d89ec533Spatrick   CHECK(flags()->report_bugs);
635d89ec533Spatrick   CHECK(!thr->suppress_reports);
6363cab2bb3Spatrick   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
6373cab2bb3Spatrick   const ReportDesc *rep = srep.GetReport();
6383cab2bb3Spatrick   CHECK_EQ(thr->current_report, nullptr);
6393cab2bb3Spatrick   thr->current_report = rep;
6403cab2bb3Spatrick   Suppression *supp = 0;
6413cab2bb3Spatrick   uptr pc_or_addr = 0;
6423cab2bb3Spatrick   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
6433cab2bb3Spatrick     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
6443cab2bb3Spatrick   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
6453cab2bb3Spatrick     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
6463cab2bb3Spatrick   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
6473cab2bb3Spatrick     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
6483cab2bb3Spatrick   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
6493cab2bb3Spatrick     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
6503cab2bb3Spatrick   if (pc_or_addr != 0) {
6513cab2bb3Spatrick     Lock lock(&ctx->fired_suppressions_mtx);
6523cab2bb3Spatrick     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
6533cab2bb3Spatrick     ctx->fired_suppressions.push_back(s);
6543cab2bb3Spatrick   }
6553cab2bb3Spatrick   {
6563cab2bb3Spatrick     bool suppressed = OnReport(rep, pc_or_addr != 0);
6573cab2bb3Spatrick     if (suppressed) {
6583cab2bb3Spatrick       thr->current_report = nullptr;
6593cab2bb3Spatrick       return false;
6603cab2bb3Spatrick     }
6613cab2bb3Spatrick   }
6623cab2bb3Spatrick   PrintReport(rep);
6633cab2bb3Spatrick   __tsan_on_report(rep);
6643cab2bb3Spatrick   ctx->nreported++;
6653cab2bb3Spatrick   if (flags()->halt_on_error)
6663cab2bb3Spatrick     Die();
6673cab2bb3Spatrick   thr->current_report = nullptr;
6683cab2bb3Spatrick   return true;
6693cab2bb3Spatrick }
6703cab2bb3Spatrick 
IsFiredSuppression(Context * ctx,ReportType type,StackTrace trace)6713cab2bb3Spatrick bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
6723cab2bb3Spatrick   ReadLock lock(&ctx->fired_suppressions_mtx);
6733cab2bb3Spatrick   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
6743cab2bb3Spatrick     if (ctx->fired_suppressions[k].type != type)
6753cab2bb3Spatrick       continue;
6763cab2bb3Spatrick     for (uptr j = 0; j < trace.size; j++) {
6773cab2bb3Spatrick       FiredSuppression *s = &ctx->fired_suppressions[k];
6783cab2bb3Spatrick       if (trace.trace[j] == s->pc_or_addr) {
6793cab2bb3Spatrick         if (s->supp)
6803cab2bb3Spatrick           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
6813cab2bb3Spatrick         return true;
6823cab2bb3Spatrick       }
6833cab2bb3Spatrick     }
6843cab2bb3Spatrick   }
6853cab2bb3Spatrick   return false;
6863cab2bb3Spatrick }
6873cab2bb3Spatrick 
IsFiredSuppression(Context * ctx,ReportType type,uptr addr)6883cab2bb3Spatrick static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
6893cab2bb3Spatrick   ReadLock lock(&ctx->fired_suppressions_mtx);
6903cab2bb3Spatrick   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
6913cab2bb3Spatrick     if (ctx->fired_suppressions[k].type != type)
6923cab2bb3Spatrick       continue;
6933cab2bb3Spatrick     FiredSuppression *s = &ctx->fired_suppressions[k];
6943cab2bb3Spatrick     if (addr == s->pc_or_addr) {
6953cab2bb3Spatrick       if (s->supp)
6963cab2bb3Spatrick         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
6973cab2bb3Spatrick       return true;
6983cab2bb3Spatrick     }
6993cab2bb3Spatrick   }
7003cab2bb3Spatrick   return false;
7013cab2bb3Spatrick }
7023cab2bb3Spatrick 
SpuriousRace(Shadow old)703*810390e3Srobert static bool SpuriousRace(Shadow old) {
704*810390e3Srobert   Shadow last(LoadShadow(&ctx->last_spurious_race));
705*810390e3Srobert   return last.sid() == old.sid() && last.epoch() == old.epoch();
7063cab2bb3Spatrick }
7073cab2bb3Spatrick 
ReportRace(ThreadState * thr,RawShadow * shadow_mem,Shadow cur,Shadow old,AccessType typ0)708*810390e3Srobert void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
709*810390e3Srobert                 AccessType typ0) {
710d89ec533Spatrick   CheckedMutex::CheckNoLocks();
7113cab2bb3Spatrick 
7123cab2bb3Spatrick   // Symbolizer makes lots of intercepted calls. If we try to process them,
7133cab2bb3Spatrick   // at best it will cause deadlocks on internal mutexes.
7143cab2bb3Spatrick   ScopedIgnoreInterceptors ignore;
7153cab2bb3Spatrick 
716*810390e3Srobert   uptr addr = ShadowToMem(shadow_mem);
717*810390e3Srobert   DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
718d89ec533Spatrick   if (!ShouldReport(thr, ReportTypeRace))
7193cab2bb3Spatrick     return;
720*810390e3Srobert   uptr addr_off0, size0;
721*810390e3Srobert   cur.GetAccess(&addr_off0, &size0, nullptr);
722*810390e3Srobert   uptr addr_off1, size1, typ1;
723*810390e3Srobert   old.GetAccess(&addr_off1, &size1, &typ1);
724*810390e3Srobert   if (!flags()->report_atomic_races &&
725*810390e3Srobert       ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
726*810390e3Srobert       !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
7273cab2bb3Spatrick     return;
728*810390e3Srobert   if (SpuriousRace(old))
7293cab2bb3Spatrick     return;
7303cab2bb3Spatrick 
7313cab2bb3Spatrick   const uptr kMop = 2;
732*810390e3Srobert   Shadow s[kMop] = {cur, old};
733*810390e3Srobert   uptr addr0 = addr + addr_off0;
734*810390e3Srobert   uptr addr1 = addr + addr_off1;
735*810390e3Srobert   uptr end0 = addr0 + size0;
736*810390e3Srobert   uptr end1 = addr1 + size1;
737*810390e3Srobert   uptr addr_min = min(addr0, addr1);
738*810390e3Srobert   uptr addr_max = max(end0, end1);
739*810390e3Srobert   if (IsExpectedReport(addr_min, addr_max - addr_min))
7403cab2bb3Spatrick     return;
7413cab2bb3Spatrick 
742*810390e3Srobert   ReportType rep_typ = ReportTypeRace;
743*810390e3Srobert   if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
744*810390e3Srobert     rep_typ = ReportTypeVptrUseAfterFree;
745*810390e3Srobert   else if (typ0 & kAccessVptr)
746*810390e3Srobert     rep_typ = ReportTypeVptrRace;
747*810390e3Srobert   else if (typ1 & kAccessFree)
748*810390e3Srobert     rep_typ = ReportTypeUseAfterFree;
7493cab2bb3Spatrick 
750*810390e3Srobert   if (IsFiredSuppression(ctx, rep_typ, addr))
751*810390e3Srobert     return;
752*810390e3Srobert 
753*810390e3Srobert   VarSizeStackTrace traces[kMop];
754*810390e3Srobert   Tid tids[kMop] = {thr->tid, kInvalidTid};
755*810390e3Srobert   uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
756*810390e3Srobert 
757*810390e3Srobert   ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
758*810390e3Srobert   if (IsFiredSuppression(ctx, rep_typ, traces[0]))
759*810390e3Srobert     return;
760*810390e3Srobert 
761*810390e3Srobert   DynamicMutexSet mset1;
762*810390e3Srobert   MutexSet *mset[kMop] = {&thr->mset, mset1};
763*810390e3Srobert 
764*810390e3Srobert   // We need to lock the slot during RestoreStack because it protects
765*810390e3Srobert   // the slot journal.
766*810390e3Srobert   Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
767*810390e3Srobert   ThreadRegistryLock l0(&ctx->thread_registry);
768*810390e3Srobert   Lock slots_lock(&ctx->slot_mtx);
769*810390e3Srobert   if (SpuriousRace(old))
770*810390e3Srobert     return;
771*810390e3Srobert   if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
772*810390e3Srobert                     size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
773*810390e3Srobert     StoreShadow(&ctx->last_spurious_race, old.raw());
774*810390e3Srobert     return;
775*810390e3Srobert   }
776*810390e3Srobert 
777*810390e3Srobert   if (IsFiredSuppression(ctx, rep_typ, traces[1]))
7783cab2bb3Spatrick     return;
7793cab2bb3Spatrick 
7801f9cb04fSpatrick   if (HandleRacyStacks(thr, traces))
7813cab2bb3Spatrick     return;
7823cab2bb3Spatrick 
7833cab2bb3Spatrick   // If any of the accesses has a tag, treat this as an "external" race.
7843cab2bb3Spatrick   uptr tag = kExternalTagNone;
7853cab2bb3Spatrick   for (uptr i = 0; i < kMop; i++) {
7863cab2bb3Spatrick     if (tags[i] != kExternalTagNone) {
787*810390e3Srobert       rep_typ = ReportTypeExternalRace;
7883cab2bb3Spatrick       tag = tags[i];
7893cab2bb3Spatrick       break;
7903cab2bb3Spatrick     }
7913cab2bb3Spatrick   }
7923cab2bb3Spatrick 
793*810390e3Srobert   ScopedReport rep(rep_typ, tag);
794*810390e3Srobert   for (uptr i = 0; i < kMop; i++)
795*810390e3Srobert     rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
7963cab2bb3Spatrick 
7973cab2bb3Spatrick   for (uptr i = 0; i < kMop; i++) {
7983cab2bb3Spatrick     ThreadContext *tctx = static_cast<ThreadContext *>(
799*810390e3Srobert         ctx->thread_registry.GetThreadLocked(tids[i]));
8003cab2bb3Spatrick     rep.AddThread(tctx);
8013cab2bb3Spatrick   }
8023cab2bb3Spatrick 
8033cab2bb3Spatrick   rep.AddLocation(addr_min, addr_max - addr_min);
8043cab2bb3Spatrick 
805*810390e3Srobert   if (flags()->print_full_thread_history) {
806*810390e3Srobert     const ReportDesc *rep_desc = rep.GetReport();
807*810390e3Srobert     for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
808*810390e3Srobert       Tid parent_tid = rep_desc->threads[i]->parent_tid;
809*810390e3Srobert       if (parent_tid == kMainTid || parent_tid == kInvalidTid)
810*810390e3Srobert         continue;
811*810390e3Srobert       ThreadContext *parent_tctx = static_cast<ThreadContext *>(
812*810390e3Srobert           ctx->thread_registry.GetThreadLocked(parent_tid));
813*810390e3Srobert       rep.AddThread(parent_tctx);
8143cab2bb3Spatrick     }
815*810390e3Srobert   }
8163cab2bb3Spatrick 
817*810390e3Srobert #if !SANITIZER_GO
818*810390e3Srobert   if (!((typ0 | typ1) & kAccessFree) &&
819*810390e3Srobert       s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
820*810390e3Srobert     rep.AddSleep(thr->last_sleep_stack_id);
821*810390e3Srobert #endif
822d89ec533Spatrick   OutputReport(thr, rep);
8233cab2bb3Spatrick }
8243cab2bb3Spatrick 
PrintCurrentStack(ThreadState * thr,uptr pc)8253cab2bb3Spatrick void PrintCurrentStack(ThreadState *thr, uptr pc) {
8263cab2bb3Spatrick   VarSizeStackTrace trace;
8273cab2bb3Spatrick   ObtainCurrentStack(thr, pc, &trace);
8283cab2bb3Spatrick   PrintStack(SymbolizeStack(trace));
8293cab2bb3Spatrick }
8303cab2bb3Spatrick 
8313cab2bb3Spatrick // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
8323cab2bb3Spatrick // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
8333cab2bb3Spatrick // tail-call to PrintCurrentStackSlow breaks this assumption because
8343cab2bb3Spatrick // __sanitizer_print_stack_trace disappears after tail-call.
8353cab2bb3Spatrick // However, this solution is not reliable enough, please see dvyukov's comment
8363cab2bb3Spatrick // http://reviews.llvm.org/D19148#406208
8373cab2bb3Spatrick // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
PrintCurrentStackSlow(uptr pc)838d89ec533Spatrick ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
8393cab2bb3Spatrick #if !SANITIZER_GO
8403cab2bb3Spatrick   uptr bp = GET_CURRENT_FRAME();
841*810390e3Srobert   auto *ptrace = New<BufferedStackTrace>();
8423cab2bb3Spatrick   ptrace->Unwind(pc, bp, nullptr, false);
8433cab2bb3Spatrick 
8443cab2bb3Spatrick   for (uptr i = 0; i < ptrace->size / 2; i++) {
8453cab2bb3Spatrick     uptr tmp = ptrace->trace_buffer[i];
8463cab2bb3Spatrick     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
8473cab2bb3Spatrick     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
8483cab2bb3Spatrick   }
8493cab2bb3Spatrick   PrintStack(SymbolizeStack(*ptrace));
8503cab2bb3Spatrick #endif
8513cab2bb3Spatrick }
8523cab2bb3Spatrick 
8533cab2bb3Spatrick }  // namespace __tsan
8543cab2bb3Spatrick 
8553cab2bb3Spatrick using namespace __tsan;
8563cab2bb3Spatrick 
8573cab2bb3Spatrick extern "C" {
8583cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__sanitizer_print_stack_trace()8593cab2bb3Spatrick void __sanitizer_print_stack_trace() {
8603cab2bb3Spatrick   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
8613cab2bb3Spatrick }
8623cab2bb3Spatrick }  // extern "C"
863