xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp (revision c9ccf3a32da427475985b85d7df023ccfb138c27)
1 //===-- tsan_rtl_report.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_common/sanitizer_libc.h"
14 #include "sanitizer_common/sanitizer_placement_new.h"
15 #include "sanitizer_common/sanitizer_stackdepot.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_stacktrace.h"
18 #include "tsan_platform.h"
19 #include "tsan_rtl.h"
20 #include "tsan_suppressions.h"
21 #include "tsan_symbolize.h"
22 #include "tsan_report.h"
23 #include "tsan_sync.h"
24 #include "tsan_mman.h"
25 #include "tsan_flags.h"
26 #include "tsan_fd.h"
27 
28 namespace __tsan {
29 
30 using namespace __sanitizer;
31 
32 static ReportStack *SymbolizeStack(StackTrace trace);
33 
34 // Can be overriden by an application/test to intercept reports.
35 #ifdef TSAN_EXTERNAL_HOOKS
36 bool OnReport(const ReportDesc *rep, bool suppressed);
37 #else
38 SANITIZER_WEAK_CXX_DEFAULT_IMPL
39 bool OnReport(const ReportDesc *rep, bool suppressed) {
40   (void)rep;
41   return suppressed;
42 }
43 #endif
44 
45 SANITIZER_WEAK_DEFAULT_IMPL
46 void __tsan_on_report(const ReportDesc *rep) {
47   (void)rep;
48 }
49 
50 static void StackStripMain(SymbolizedStack *frames) {
51   SymbolizedStack *last_frame = nullptr;
52   SymbolizedStack *last_frame2 = nullptr;
53   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
54     last_frame2 = last_frame;
55     last_frame = cur;
56   }
57 
58   if (last_frame2 == 0)
59     return;
60 #if !SANITIZER_GO
61   const char *last = last_frame->info.function;
62   const char *last2 = last_frame2->info.function;
63   // Strip frame above 'main'
64   if (last2 && 0 == internal_strcmp(last2, "main")) {
65     last_frame->ClearAll();
66     last_frame2->next = nullptr;
67   // Strip our internal thread start routine.
68   } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
69     last_frame->ClearAll();
70     last_frame2->next = nullptr;
71     // Strip global ctors init, .preinit_array and main caller.
72   } else if (last && (0 == internal_strcmp(last, "__do_global_ctors_aux") ||
73                       0 == internal_strcmp(last, "__libc_csu_init") ||
74                       0 == internal_strcmp(last, "__libc_start_main"))) {
75     last_frame->ClearAll();
76     last_frame2->next = nullptr;
77   // If both are 0, then we probably just failed to symbolize.
78   } else if (last || last2) {
79     // Ensure that we recovered stack completely. Trimmed stack
80     // can actually happen if we do not instrument some code,
81     // so it's only a debug print. However we must try hard to not miss it
82     // due to our fault.
83     DPrintf("Bottom stack frame is missed\n");
84   }
85 #else
86   // The last frame always point into runtime (gosched0, goexit0, runtime.main).
87   last_frame->ClearAll();
88   last_frame2->next = nullptr;
89 #endif
90 }
91 
92 ReportStack *SymbolizeStackId(u32 stack_id) {
93   if (stack_id == 0)
94     return 0;
95   StackTrace stack = StackDepotGet(stack_id);
96   if (stack.trace == nullptr)
97     return nullptr;
98   return SymbolizeStack(stack);
99 }
100 
101 static ReportStack *SymbolizeStack(StackTrace trace) {
102   if (trace.size == 0)
103     return 0;
104   SymbolizedStack *top = nullptr;
105   for (uptr si = 0; si < trace.size; si++) {
106     const uptr pc = trace.trace[si];
107     uptr pc1 = pc;
108     // We obtain the return address, but we're interested in the previous
109     // instruction.
110     if ((pc & kExternalPCBit) == 0)
111       pc1 = StackTrace::GetPreviousInstructionPc(pc);
112     SymbolizedStack *ent = SymbolizeCode(pc1);
113     CHECK_NE(ent, 0);
114     SymbolizedStack *last = ent;
115     while (last->next) {
116       last->info.address = pc;  // restore original pc for report
117       last = last->next;
118     }
119     last->info.address = pc;  // restore original pc for report
120     last->next = top;
121     top = ent;
122   }
123   StackStripMain(top);
124 
125   auto *stack = New<ReportStack>();
126   stack->frames = top;
127   return stack;
128 }
129 
130 bool ShouldReport(ThreadState *thr, ReportType typ) {
131   // We set thr->suppress_reports in the fork context.
132   // Taking any locking in the fork context can lead to deadlocks.
133   // If any locks are already taken, it's too late to do this check.
134   CheckedMutex::CheckNoLocks();
135   // For the same reason check we didn't lock thread_registry yet.
136   if (SANITIZER_DEBUG)
137     ThreadRegistryLock l(&ctx->thread_registry);
138   if (!flags()->report_bugs || thr->suppress_reports)
139     return false;
140   switch (typ) {
141     case ReportTypeSignalUnsafe:
142       return flags()->report_signal_unsafe;
143     case ReportTypeThreadLeak:
144 #if !SANITIZER_GO
145       // It's impossible to join phantom threads
146       // in the child after fork.
147       if (ctx->after_multithreaded_fork)
148         return false;
149 #endif
150       return flags()->report_thread_leaks;
151     case ReportTypeMutexDestroyLocked:
152       return flags()->report_destroy_locked;
153     default:
154       return true;
155   }
156 }
157 
158 ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
159   ctx->thread_registry.CheckLocked();
160   rep_ = New<ReportDesc>();
161   rep_->typ = typ;
162   rep_->tag = tag;
163   ctx->report_mtx.Lock();
164 }
165 
166 ScopedReportBase::~ScopedReportBase() {
167   ctx->report_mtx.Unlock();
168   DestroyAndFree(rep_);
169 }
170 
171 void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
172   ReportStack **rs = rep_->stacks.PushBack();
173   *rs = SymbolizeStack(stack);
174   (*rs)->suppressable = suppressable;
175 }
176 
177 void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
178                                        Tid tid, StackTrace stack,
179                                        const MutexSet *mset) {
180   uptr addr0, size;
181   AccessType typ;
182   s.GetAccess(&addr0, &size, &typ);
183   auto *mop = New<ReportMop>();
184   rep_->mops.PushBack(mop);
185   mop->tid = tid;
186   mop->addr = addr + addr0;
187   mop->size = size;
188   mop->write = !(typ & kAccessRead);
189   mop->atomic = typ & kAccessAtomic;
190   mop->stack = SymbolizeStack(stack);
191   mop->external_tag = external_tag;
192   if (mop->stack)
193     mop->stack->suppressable = true;
194   for (uptr i = 0; i < mset->Size(); i++) {
195     MutexSet::Desc d = mset->Get(i);
196     int id = this->AddMutex(d.addr, d.stack_id);
197     ReportMopMutex mtx = {id, d.write};
198     mop->mset.PushBack(mtx);
199   }
200 }
201 
202 void ScopedReportBase::AddUniqueTid(Tid unique_tid) {
203   rep_->unique_tids.PushBack(unique_tid);
204 }
205 
206 void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
207   for (uptr i = 0; i < rep_->threads.Size(); i++) {
208     if ((u32)rep_->threads[i]->id == tctx->tid)
209       return;
210   }
211   auto *rt = New<ReportThread>();
212   rep_->threads.PushBack(rt);
213   rt->id = tctx->tid;
214   rt->os_id = tctx->os_id;
215   rt->running = (tctx->status == ThreadStatusRunning);
216   rt->name = internal_strdup(tctx->name);
217   rt->parent_tid = tctx->parent_tid;
218   rt->thread_type = tctx->thread_type;
219   rt->stack = 0;
220   rt->stack = SymbolizeStackId(tctx->creation_stack_id);
221   if (rt->stack)
222     rt->stack->suppressable = suppressable;
223 }
224 
225 #if !SANITIZER_GO
226 static ThreadContext *FindThreadByTidLocked(Tid tid) {
227   ctx->thread_registry.CheckLocked();
228   return static_cast<ThreadContext *>(
229       ctx->thread_registry.GetThreadLocked(tid));
230 }
231 
232 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
233   uptr addr = (uptr)arg;
234   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
235   if (tctx->status != ThreadStatusRunning)
236     return false;
237   ThreadState *thr = tctx->thr;
238   CHECK(thr);
239   return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
240           (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
241 }
242 
243 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
244   ctx->thread_registry.CheckLocked();
245   ThreadContext *tctx =
246       static_cast<ThreadContext *>(ctx->thread_registry.FindThreadContextLocked(
247           IsInStackOrTls, (void *)addr));
248   if (!tctx)
249     return 0;
250   ThreadState *thr = tctx->thr;
251   CHECK(thr);
252   *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
253   return tctx;
254 }
255 #endif
256 
257 void ScopedReportBase::AddThread(Tid tid, bool suppressable) {
258 #if !SANITIZER_GO
259   if (const ThreadContext *tctx = FindThreadByTidLocked(tid))
260     AddThread(tctx, suppressable);
261 #endif
262 }
263 
264 int ScopedReportBase::AddMutex(uptr addr, StackID creation_stack_id) {
265   for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
266     if (rep_->mutexes[i]->addr == addr)
267       return rep_->mutexes[i]->id;
268   }
269   auto *rm = New<ReportMutex>();
270   rep_->mutexes.PushBack(rm);
271   rm->id = rep_->mutexes.Size() - 1;
272   rm->addr = addr;
273   rm->stack = SymbolizeStackId(creation_stack_id);
274   return rm->id;
275 }
276 
277 void ScopedReportBase::AddLocation(uptr addr, uptr size) {
278   if (addr == 0)
279     return;
280 #if !SANITIZER_GO
281   int fd = -1;
282   Tid creat_tid = kInvalidTid;
283   StackID creat_stack = 0;
284   if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
285     auto *loc = New<ReportLocation>();
286     loc->type = ReportLocationFD;
287     loc->fd = fd;
288     loc->tid = creat_tid;
289     loc->stack = SymbolizeStackId(creat_stack);
290     rep_->locs.PushBack(loc);
291     ThreadContext *tctx = FindThreadByTidLocked(creat_tid);
292     if (tctx)
293       AddThread(tctx);
294     return;
295   }
296   MBlock *b = 0;
297   uptr block_begin = 0;
298   Allocator *a = allocator();
299   if (a->PointerIsMine((void*)addr)) {
300     block_begin = (uptr)a->GetBlockBegin((void *)addr);
301     if (block_begin)
302       b = ctx->metamap.GetBlock(block_begin);
303   }
304   if (!b)
305     b = JavaHeapBlock(addr, &block_begin);
306   if (b != 0) {
307     auto *loc = New<ReportLocation>();
308     loc->type = ReportLocationHeap;
309     loc->heap_chunk_start = block_begin;
310     loc->heap_chunk_size = b->siz;
311     loc->external_tag = b->tag;
312     loc->tid = b->tid;
313     loc->stack = SymbolizeStackId(b->stk);
314     rep_->locs.PushBack(loc);
315     if (ThreadContext *tctx = FindThreadByTidLocked(b->tid))
316       AddThread(tctx);
317     return;
318   }
319   bool is_stack = false;
320   if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
321     auto *loc = New<ReportLocation>();
322     loc->type = is_stack ? ReportLocationStack : ReportLocationTLS;
323     loc->tid = tctx->tid;
324     rep_->locs.PushBack(loc);
325     AddThread(tctx);
326   }
327 #endif
328   if (ReportLocation *loc = SymbolizeData(addr)) {
329     loc->suppressable = true;
330     rep_->locs.PushBack(loc);
331     return;
332   }
333 }
334 
335 #if !SANITIZER_GO
336 void ScopedReportBase::AddSleep(StackID stack_id) {
337   rep_->sleep = SymbolizeStackId(stack_id);
338 }
339 #endif
340 
341 void ScopedReportBase::SetCount(int count) { rep_->count = count; }
342 
343 const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
344 
345 ScopedReport::ScopedReport(ReportType typ, uptr tag)
346     : ScopedReportBase(typ, tag) {}
347 
348 ScopedReport::~ScopedReport() {}
349 
350 // Replays the trace up to last_pos position in the last part
351 // or up to the provided epoch/sid (whichever is earlier)
352 // and calls the provided function f for each event.
353 template <typename Func>
354 void TraceReplay(Trace *trace, TracePart *last, Event *last_pos, Sid sid,
355                  Epoch epoch, Func f) {
356   TracePart *part = trace->parts.Front();
357   Sid ev_sid = kFreeSid;
358   Epoch ev_epoch = kEpochOver;
359   for (;;) {
360     DCHECK_EQ(part->trace, trace);
361     // Note: an event can't start in the last element.
362     // Since an event can take up to 2 elements,
363     // we ensure we have at least 2 before adding an event.
364     Event *end = &part->events[TracePart::kSize - 1];
365     if (part == last)
366       end = last_pos;
367     f(kFreeSid, kEpochOver, nullptr);  // notify about part start
368     for (Event *evp = &part->events[0]; evp < end; evp++) {
369       Event *evp0 = evp;
370       if (!evp->is_access && !evp->is_func) {
371         switch (evp->type) {
372           case EventType::kTime: {
373             auto *ev = reinterpret_cast<EventTime *>(evp);
374             ev_sid = static_cast<Sid>(ev->sid);
375             ev_epoch = static_cast<Epoch>(ev->epoch);
376             if (ev_sid == sid && ev_epoch > epoch)
377               return;
378             break;
379           }
380           case EventType::kAccessExt:
381             FALLTHROUGH;
382           case EventType::kAccessRange:
383             FALLTHROUGH;
384           case EventType::kLock:
385             FALLTHROUGH;
386           case EventType::kRLock:
387             // These take 2 Event elements.
388             evp++;
389             break;
390           case EventType::kUnlock:
391             // This takes 1 Event element.
392             break;
393         }
394       }
395       CHECK_NE(ev_sid, kFreeSid);
396       CHECK_NE(ev_epoch, kEpochOver);
397       f(ev_sid, ev_epoch, evp0);
398     }
399     if (part == last)
400       return;
401     part = trace->parts.Next(part);
402     CHECK(part);
403   }
404   CHECK(0);
405 }
406 
407 static void RestoreStackMatch(VarSizeStackTrace *pstk, MutexSet *pmset,
408                               Vector<uptr> *stack, MutexSet *mset, uptr pc,
409                               bool *found) {
410   DPrintf2("    MATCHED\n");
411   *pmset = *mset;
412   stack->PushBack(pc);
413   pstk->Init(&(*stack)[0], stack->Size());
414   stack->PopBack();
415   *found = true;
416 }
417 
418 // Checks if addr1|size1 is fully contained in addr2|size2.
419 // We check for fully contained instread of just overlapping
420 // because a memory access is always traced once, but can be
421 // split into multiple accesses in the shadow.
422 static constexpr bool IsWithinAccess(uptr addr1, uptr size1, uptr addr2,
423                                      uptr size2) {
424   return addr1 >= addr2 && addr1 + size1 <= addr2 + size2;
425 }
426 
427 // Replays the trace of slot sid up to the target event identified
428 // by epoch/addr/size/typ and restores and returns tid, stack, mutex set
429 // and tag for that event. If there are multiple such events, it returns
430 // the last one. Returns false if the event is not present in the trace.
431 bool RestoreStack(EventType type, Sid sid, Epoch epoch, uptr addr, uptr size,
432                   AccessType typ, Tid *ptid, VarSizeStackTrace *pstk,
433                   MutexSet *pmset, uptr *ptag) {
434   // This function restores stack trace and mutex set for the thread/epoch.
435   // It does so by getting stack trace and mutex set at the beginning of
436   // trace part, and then replaying the trace till the given epoch.
437   DPrintf2("RestoreStack: sid=%u@%u addr=0x%zx/%zu typ=%x\n",
438            static_cast<int>(sid), static_cast<int>(epoch), addr, size,
439            static_cast<int>(typ));
440   ctx->slot_mtx.CheckLocked();  // needed to prevent trace part recycling
441   ctx->thread_registry.CheckLocked();
442   TidSlot *slot = &ctx->slots[static_cast<uptr>(sid)];
443   Tid tid = kInvalidTid;
444   // Need to lock the slot mutex as it protects slot->journal.
445   slot->mtx.CheckLocked();
446   for (uptr i = 0; i < slot->journal.Size(); i++) {
447     DPrintf2("  journal: epoch=%d tid=%d\n",
448              static_cast<int>(slot->journal[i].epoch), slot->journal[i].tid);
449     if (i == slot->journal.Size() - 1 || slot->journal[i + 1].epoch > epoch) {
450       tid = slot->journal[i].tid;
451       break;
452     }
453   }
454   if (tid == kInvalidTid)
455     return false;
456   *ptid = tid;
457   ThreadContext *tctx =
458       static_cast<ThreadContext *>(ctx->thread_registry.GetThreadLocked(tid));
459   Trace *trace = &tctx->trace;
460   // Snapshot first/last parts and the current position in the last part.
461   TracePart *first_part;
462   TracePart *last_part;
463   Event *last_pos;
464   {
465     Lock lock(&trace->mtx);
466     first_part = trace->parts.Front();
467     if (!first_part) {
468       DPrintf2("RestoreStack: tid=%d trace=%p no trace parts\n", tid, trace);
469       return false;
470     }
471     last_part = trace->parts.Back();
472     last_pos = trace->final_pos;
473     if (tctx->thr)
474       last_pos = (Event *)atomic_load_relaxed(&tctx->thr->trace_pos);
475   }
476   DynamicMutexSet mset;
477   Vector<uptr> stack;
478   uptr prev_pc = 0;
479   bool found = false;
480   bool is_read = typ & kAccessRead;
481   bool is_atomic = typ & kAccessAtomic;
482   bool is_free = typ & kAccessFree;
483   DPrintf2("RestoreStack: tid=%d parts=[%p-%p] last_pos=%p\n", tid,
484            trace->parts.Front(), last_part, last_pos);
485   TraceReplay(
486       trace, last_part, last_pos, sid, epoch,
487       [&](Sid ev_sid, Epoch ev_epoch, Event *evp) {
488         if (evp == nullptr) {
489           // Each trace part is self-consistent, so we reset state.
490           stack.Resize(0);
491           mset->Reset();
492           prev_pc = 0;
493           return;
494         }
495         bool match = ev_sid == sid && ev_epoch == epoch;
496         if (evp->is_access) {
497           if (evp->is_func == 0 && evp->type == EventType::kAccessExt &&
498               evp->_ == 0)  // NopEvent
499             return;
500           auto *ev = reinterpret_cast<EventAccess *>(evp);
501           uptr ev_addr = RestoreAddr(ev->addr);
502           uptr ev_size = 1 << ev->size_log;
503           uptr ev_pc =
504               prev_pc + ev->pc_delta - (1 << (EventAccess::kPCBits - 1));
505           prev_pc = ev_pc;
506           DPrintf2("  Access: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
507                    ev_addr, ev_size, ev->is_read, ev->is_atomic);
508           if (match && type == EventType::kAccessExt &&
509               IsWithinAccess(addr, size, ev_addr, ev_size) &&
510               is_read == ev->is_read && is_atomic == ev->is_atomic && !is_free)
511             RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
512           return;
513         }
514         if (evp->is_func) {
515           auto *ev = reinterpret_cast<EventFunc *>(evp);
516           if (ev->pc) {
517             DPrintf2(" FuncEnter: pc=0x%llx\n", ev->pc);
518             stack.PushBack(ev->pc);
519           } else {
520             DPrintf2(" FuncExit\n");
521             // We don't log pathologically large stacks in each part,
522             // if the stack was truncated we can have more func exits than
523             // entries.
524             if (stack.Size())
525               stack.PopBack();
526           }
527           return;
528         }
529         switch (evp->type) {
530           case EventType::kAccessExt: {
531             auto *ev = reinterpret_cast<EventAccessExt *>(evp);
532             uptr ev_addr = RestoreAddr(ev->addr);
533             uptr ev_size = 1 << ev->size_log;
534             prev_pc = ev->pc;
535             DPrintf2("  AccessExt: pc=0x%llx addr=0x%zx/%zu type=%u/%u\n",
536                      ev->pc, ev_addr, ev_size, ev->is_read, ev->is_atomic);
537             if (match && type == EventType::kAccessExt &&
538                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
539                 is_read == ev->is_read && is_atomic == ev->is_atomic &&
540                 !is_free)
541               RestoreStackMatch(pstk, pmset, &stack, mset, ev->pc, &found);
542             break;
543           }
544           case EventType::kAccessRange: {
545             auto *ev = reinterpret_cast<EventAccessRange *>(evp);
546             uptr ev_addr = RestoreAddr(ev->addr);
547             uptr ev_size =
548                 (ev->size_hi << EventAccessRange::kSizeLoBits) + ev->size_lo;
549             uptr ev_pc = RestoreAddr(ev->pc);
550             prev_pc = ev_pc;
551             DPrintf2("  Range: pc=0x%zx addr=0x%zx/%zu type=%u/%u\n", ev_pc,
552                      ev_addr, ev_size, ev->is_read, ev->is_free);
553             if (match && type == EventType::kAccessExt &&
554                 IsWithinAccess(addr, size, ev_addr, ev_size) &&
555                 is_read == ev->is_read && !is_atomic && is_free == ev->is_free)
556               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
557             break;
558           }
559           case EventType::kLock:
560             FALLTHROUGH;
561           case EventType::kRLock: {
562             auto *ev = reinterpret_cast<EventLock *>(evp);
563             bool is_write = ev->type == EventType::kLock;
564             uptr ev_addr = RestoreAddr(ev->addr);
565             uptr ev_pc = RestoreAddr(ev->pc);
566             StackID stack_id =
567                 (ev->stack_hi << EventLock::kStackIDLoBits) + ev->stack_lo;
568             DPrintf2("  Lock: pc=0x%zx addr=0x%zx stack=%u write=%d\n", ev_pc,
569                      ev_addr, stack_id, is_write);
570             mset->AddAddr(ev_addr, stack_id, is_write);
571             // Events with ev_pc == 0 are written to the beginning of trace
572             // part as initial mutex set (are not real).
573             if (match && type == EventType::kLock && addr == ev_addr && ev_pc)
574               RestoreStackMatch(pstk, pmset, &stack, mset, ev_pc, &found);
575             break;
576           }
577           case EventType::kUnlock: {
578             auto *ev = reinterpret_cast<EventUnlock *>(evp);
579             uptr ev_addr = RestoreAddr(ev->addr);
580             DPrintf2("  Unlock: addr=0x%zx\n", ev_addr);
581             mset->DelAddr(ev_addr);
582             break;
583           }
584           case EventType::kTime:
585             // TraceReplay already extracted sid/epoch from it,
586             // nothing else to do here.
587             break;
588         }
589       });
590   ExtractTagFromStack(pstk, ptag);
591   return found;
592 }
593 
594 bool RacyStacks::operator==(const RacyStacks &other) const {
595   if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
596     return true;
597   if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
598     return true;
599   return false;
600 }
601 
602 static bool FindRacyStacks(const RacyStacks &hash) {
603   for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
604     if (hash == ctx->racy_stacks[i]) {
605       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (stack)\n");
606       return true;
607     }
608   }
609   return false;
610 }
611 
612 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2]) {
613   if (!flags()->suppress_equal_stacks)
614     return false;
615   RacyStacks hash;
616   hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
617   hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
618   {
619     ReadLock lock(&ctx->racy_mtx);
620     if (FindRacyStacks(hash))
621       return true;
622   }
623   Lock lock(&ctx->racy_mtx);
624   if (FindRacyStacks(hash))
625     return true;
626   ctx->racy_stacks.PushBack(hash);
627   return false;
628 }
629 
630 static bool FindRacyAddress(const RacyAddress &ra0) {
631   for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
632     RacyAddress ra2 = ctx->racy_addresses[i];
633     uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
634     uptr minend = min(ra0.addr_max, ra2.addr_max);
635     if (maxbeg < minend) {
636       VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
637       return true;
638     }
639   }
640   return false;
641 }
642 
643 static bool HandleRacyAddress(ThreadState *thr, uptr addr_min, uptr addr_max) {
644   if (!flags()->suppress_equal_addresses)
645     return false;
646   RacyAddress ra0 = {addr_min, addr_max};
647   {
648     ReadLock lock(&ctx->racy_mtx);
649     if (FindRacyAddress(ra0))
650       return true;
651   }
652   Lock lock(&ctx->racy_mtx);
653   if (FindRacyAddress(ra0))
654     return true;
655   ctx->racy_addresses.PushBack(ra0);
656   return false;
657 }
658 
659 bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
660   // These should have been checked in ShouldReport.
661   // It's too late to check them here, we have already taken locks.
662   CHECK(flags()->report_bugs);
663   CHECK(!thr->suppress_reports);
664   atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
665   const ReportDesc *rep = srep.GetReport();
666   CHECK_EQ(thr->current_report, nullptr);
667   thr->current_report = rep;
668   Suppression *supp = 0;
669   uptr pc_or_addr = 0;
670   for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
671     pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
672   for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
673     pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
674   for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
675     pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
676   for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
677     pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
678   if (pc_or_addr != 0) {
679     Lock lock(&ctx->fired_suppressions_mtx);
680     FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
681     ctx->fired_suppressions.push_back(s);
682   }
683   {
684     bool suppressed = OnReport(rep, pc_or_addr != 0);
685     if (suppressed) {
686       thr->current_report = nullptr;
687       return false;
688     }
689   }
690   PrintReport(rep);
691   __tsan_on_report(rep);
692   ctx->nreported++;
693   if (flags()->halt_on_error)
694     Die();
695   thr->current_report = nullptr;
696   return true;
697 }
698 
699 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
700   ReadLock lock(&ctx->fired_suppressions_mtx);
701   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
702     if (ctx->fired_suppressions[k].type != type)
703       continue;
704     for (uptr j = 0; j < trace.size; j++) {
705       FiredSuppression *s = &ctx->fired_suppressions[k];
706       if (trace.trace[j] == s->pc_or_addr) {
707         if (s->supp)
708           atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
709         return true;
710       }
711     }
712   }
713   return false;
714 }
715 
716 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
717   ReadLock lock(&ctx->fired_suppressions_mtx);
718   for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
719     if (ctx->fired_suppressions[k].type != type)
720       continue;
721     FiredSuppression *s = &ctx->fired_suppressions[k];
722     if (addr == s->pc_or_addr) {
723       if (s->supp)
724         atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
725       return true;
726     }
727   }
728   return false;
729 }
730 
731 void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
732                 AccessType typ0) {
733   CheckedMutex::CheckNoLocks();
734 
735   // Symbolizer makes lots of intercepted calls. If we try to process them,
736   // at best it will cause deadlocks on internal mutexes.
737   ScopedIgnoreInterceptors ignore;
738 
739   uptr addr = ShadowToMem(shadow_mem);
740   DPrintf("#%d: ReportRace %p\n", thr->tid, (void *)addr);
741   if (!ShouldReport(thr, ReportTypeRace))
742     return;
743   uptr addr_off0, size0;
744   cur.GetAccess(&addr_off0, &size0, nullptr);
745   uptr addr_off1, size1, typ1;
746   old.GetAccess(&addr_off1, &size1, &typ1);
747   if (!flags()->report_atomic_races &&
748       ((typ0 & kAccessAtomic) || (typ1 & kAccessAtomic)) &&
749       !(typ0 & kAccessFree) && !(typ1 & kAccessFree))
750     return;
751 
752   const uptr kMop = 2;
753   Shadow s[kMop] = {cur, old};
754   uptr addr0 = addr + addr_off0;
755   uptr addr1 = addr + addr_off1;
756   uptr end0 = addr0 + size0;
757   uptr end1 = addr1 + size1;
758   uptr addr_min = min(addr0, addr1);
759   uptr addr_max = max(end0, end1);
760   if (IsExpectedReport(addr_min, addr_max - addr_min))
761     return;
762   if (HandleRacyAddress(thr, addr_min, addr_max))
763     return;
764 
765   ReportType rep_typ = ReportTypeRace;
766   if ((typ0 & kAccessVptr) && (typ1 & kAccessFree))
767     rep_typ = ReportTypeVptrUseAfterFree;
768   else if (typ0 & kAccessVptr)
769     rep_typ = ReportTypeVptrRace;
770   else if (typ1 & kAccessFree)
771     rep_typ = ReportTypeUseAfterFree;
772 
773   if (IsFiredSuppression(ctx, rep_typ, addr))
774     return;
775 
776   VarSizeStackTrace traces[kMop];
777   Tid tids[kMop] = {thr->tid, kInvalidTid};
778   uptr tags[kMop] = {kExternalTagNone, kExternalTagNone};
779 
780   ObtainCurrentStack(thr, thr->trace_prev_pc, &traces[0], &tags[0]);
781   if (IsFiredSuppression(ctx, rep_typ, traces[0]))
782     return;
783 
784   DynamicMutexSet mset1;
785   MutexSet *mset[kMop] = {&thr->mset, mset1};
786 
787   // We need to lock the slot during RestoreStack because it protects
788   // the slot journal.
789   Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
790   ThreadRegistryLock l0(&ctx->thread_registry);
791   Lock slots_lock(&ctx->slot_mtx);
792   if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
793                     size1, typ1, &tids[1], &traces[1], mset[1], &tags[1]))
794     return;
795 
796   if (IsFiredSuppression(ctx, rep_typ, traces[1]))
797     return;
798 
799   if (HandleRacyStacks(thr, traces))
800     return;
801 
802   // If any of the accesses has a tag, treat this as an "external" race.
803   uptr tag = kExternalTagNone;
804   for (uptr i = 0; i < kMop; i++) {
805     if (tags[i] != kExternalTagNone) {
806       rep_typ = ReportTypeExternalRace;
807       tag = tags[i];
808       break;
809     }
810   }
811 
812   ScopedReport rep(rep_typ, tag);
813   for (uptr i = 0; i < kMop; i++)
814     rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
815 
816   for (uptr i = 0; i < kMop; i++) {
817     ThreadContext *tctx = static_cast<ThreadContext *>(
818         ctx->thread_registry.GetThreadLocked(tids[i]));
819     rep.AddThread(tctx);
820   }
821 
822   rep.AddLocation(addr_min, addr_max - addr_min);
823 
824 #if !SANITIZER_GO
825   if (!((typ0 | typ1) & kAccessFree) &&
826       s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
827     rep.AddSleep(thr->last_sleep_stack_id);
828 #endif
829   OutputReport(thr, rep);
830 }
831 
832 void PrintCurrentStack(ThreadState *thr, uptr pc) {
833   VarSizeStackTrace trace;
834   ObtainCurrentStack(thr, pc, &trace);
835   PrintStack(SymbolizeStack(trace));
836 }
837 
838 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
839 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but
840 // tail-call to PrintCurrentStackSlow breaks this assumption because
841 // __sanitizer_print_stack_trace disappears after tail-call.
842 // However, this solution is not reliable enough, please see dvyukov's comment
843 // http://reviews.llvm.org/D19148#406208
844 // Also see PR27280 comment 2 and 3 for breaking examples and analysis.
845 ALWAYS_INLINE USED void PrintCurrentStackSlow(uptr pc) {
846 #if !SANITIZER_GO
847   uptr bp = GET_CURRENT_FRAME();
848   auto *ptrace = New<BufferedStackTrace>();
849   ptrace->Unwind(pc, bp, nullptr, false);
850 
851   for (uptr i = 0; i < ptrace->size / 2; i++) {
852     uptr tmp = ptrace->trace_buffer[i];
853     ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
854     ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
855   }
856   PrintStack(SymbolizeStack(*ptrace));
857 #endif
858 }
859 
860 }  // namespace __tsan
861 
862 using namespace __tsan;
863 
864 extern "C" {
865 SANITIZER_INTERFACE_ATTRIBUTE
866 void __sanitizer_print_stack_trace() {
867   PrintCurrentStackSlow(StackTrace::GetCurrentPc());
868 }
869 }  // extern "C"
870