1 //===-- tsan_rtl_report.cc ------------------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is a part of ThreadSanitizer (TSan), a race detector. 9 // 10 //===----------------------------------------------------------------------===// 11 12 #include "sanitizer_common/sanitizer_libc.h" 13 #include "sanitizer_common/sanitizer_placement_new.h" 14 #include "sanitizer_common/sanitizer_stackdepot.h" 15 #include "sanitizer_common/sanitizer_common.h" 16 #include "sanitizer_common/sanitizer_stacktrace.h" 17 #include "tsan_platform.h" 18 #include "tsan_rtl.h" 19 #include "tsan_suppressions.h" 20 #include "tsan_symbolize.h" 21 #include "tsan_report.h" 22 #include "tsan_sync.h" 23 #include "tsan_mman.h" 24 #include "tsan_flags.h" 25 #include "tsan_fd.h" 26 27 namespace __tsan { 28 29 using namespace __sanitizer; // NOLINT 30 31 static ReportStack *SymbolizeStack(StackTrace trace); 32 33 void TsanCheckFailed(const char *file, int line, const char *cond, 34 u64 v1, u64 v2) { 35 // There is high probability that interceptors will check-fail as well, 36 // on the other hand there is no sense in processing interceptors 37 // since we are going to die soon. 38 ScopedIgnoreInterceptors ignore; 39 #if !SANITIZER_GO 40 cur_thread()->ignore_sync++; 41 cur_thread()->ignore_reads_and_writes++; 42 #endif 43 Printf("FATAL: ThreadSanitizer CHECK failed: " 44 "%s:%d \"%s\" (0x%zx, 0x%zx)\n", 45 file, line, cond, (uptr)v1, (uptr)v2); 46 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 47 Die(); 48 } 49 50 // Can be overriden by an application/test to intercept reports. 51 #ifdef TSAN_EXTERNAL_HOOKS 52 bool OnReport(const ReportDesc *rep, bool suppressed); 53 #else 54 SANITIZER_WEAK_CXX_DEFAULT_IMPL 55 bool OnReport(const ReportDesc *rep, bool suppressed) { 56 (void)rep; 57 return suppressed; 58 } 59 #endif 60 61 SANITIZER_WEAK_DEFAULT_IMPL 62 void __tsan_on_report(const ReportDesc *rep) { 63 (void)rep; 64 } 65 66 static void StackStripMain(SymbolizedStack *frames) { 67 SymbolizedStack *last_frame = nullptr; 68 SymbolizedStack *last_frame2 = nullptr; 69 for (SymbolizedStack *cur = frames; cur; cur = cur->next) { 70 last_frame2 = last_frame; 71 last_frame = cur; 72 } 73 74 if (last_frame2 == 0) 75 return; 76 #if !SANITIZER_GO 77 const char *last = last_frame->info.function; 78 const char *last2 = last_frame2->info.function; 79 // Strip frame above 'main' 80 if (last2 && 0 == internal_strcmp(last2, "main")) { 81 last_frame->ClearAll(); 82 last_frame2->next = nullptr; 83 // Strip our internal thread start routine. 84 } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) { 85 last_frame->ClearAll(); 86 last_frame2->next = nullptr; 87 // Strip global ctors init. 88 } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) { 89 last_frame->ClearAll(); 90 last_frame2->next = nullptr; 91 // If both are 0, then we probably just failed to symbolize. 92 } else if (last || last2) { 93 // Ensure that we recovered stack completely. Trimmed stack 94 // can actually happen if we do not instrument some code, 95 // so it's only a debug print. However we must try hard to not miss it 96 // due to our fault. 97 DPrintf("Bottom stack frame is missed\n"); 98 } 99 #else 100 // The last frame always point into runtime (gosched0, goexit0, runtime.main). 101 last_frame->ClearAll(); 102 last_frame2->next = nullptr; 103 #endif 104 } 105 106 ReportStack *SymbolizeStackId(u32 stack_id) { 107 if (stack_id == 0) 108 return 0; 109 StackTrace stack = StackDepotGet(stack_id); 110 if (stack.trace == nullptr) 111 return nullptr; 112 return SymbolizeStack(stack); 113 } 114 115 static ReportStack *SymbolizeStack(StackTrace trace) { 116 if (trace.size == 0) 117 return 0; 118 SymbolizedStack *top = nullptr; 119 for (uptr si = 0; si < trace.size; si++) { 120 const uptr pc = trace.trace[si]; 121 uptr pc1 = pc; 122 // We obtain the return address, but we're interested in the previous 123 // instruction. 124 if ((pc & kExternalPCBit) == 0) 125 pc1 = StackTrace::GetPreviousInstructionPc(pc); 126 SymbolizedStack *ent = SymbolizeCode(pc1); 127 CHECK_NE(ent, 0); 128 SymbolizedStack *last = ent; 129 while (last->next) { 130 last->info.address = pc; // restore original pc for report 131 last = last->next; 132 } 133 last->info.address = pc; // restore original pc for report 134 last->next = top; 135 top = ent; 136 } 137 StackStripMain(top); 138 139 ReportStack *stack = ReportStack::New(); 140 stack->frames = top; 141 return stack; 142 } 143 144 ScopedReport::ScopedReport(ReportType typ, uptr tag) { 145 ctx->thread_registry->CheckLocked(); 146 void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc)); 147 rep_ = new(mem) ReportDesc; 148 rep_->typ = typ; 149 rep_->tag = tag; 150 ctx->report_mtx.Lock(); 151 CommonSanitizerReportMutex.Lock(); 152 } 153 154 ScopedReport::~ScopedReport() { 155 CommonSanitizerReportMutex.Unlock(); 156 ctx->report_mtx.Unlock(); 157 DestroyAndFree(rep_); 158 } 159 160 void ScopedReport::AddStack(StackTrace stack, bool suppressable) { 161 ReportStack **rs = rep_->stacks.PushBack(); 162 *rs = SymbolizeStack(stack); 163 (*rs)->suppressable = suppressable; 164 } 165 166 void ScopedReport::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, 167 StackTrace stack, const MutexSet *mset) { 168 void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop)); 169 ReportMop *mop = new(mem) ReportMop; 170 rep_->mops.PushBack(mop); 171 mop->tid = s.tid(); 172 mop->addr = addr + s.addr0(); 173 mop->size = s.size(); 174 mop->write = s.IsWrite(); 175 mop->atomic = s.IsAtomic(); 176 mop->stack = SymbolizeStack(stack); 177 mop->external_tag = external_tag; 178 if (mop->stack) 179 mop->stack->suppressable = true; 180 for (uptr i = 0; i < mset->Size(); i++) { 181 MutexSet::Desc d = mset->Get(i); 182 u64 mid = this->AddMutex(d.id); 183 ReportMopMutex mtx = {mid, d.write}; 184 mop->mset.PushBack(mtx); 185 } 186 } 187 188 void ScopedReport::AddUniqueTid(int unique_tid) { 189 rep_->unique_tids.PushBack(unique_tid); 190 } 191 192 void ScopedReport::AddThread(const ThreadContext *tctx, bool suppressable) { 193 for (uptr i = 0; i < rep_->threads.Size(); i++) { 194 if ((u32)rep_->threads[i]->id == tctx->tid) 195 return; 196 } 197 void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread)); 198 ReportThread *rt = new(mem) ReportThread; 199 rep_->threads.PushBack(rt); 200 rt->id = tctx->tid; 201 rt->os_id = tctx->os_id; 202 rt->running = (tctx->status == ThreadStatusRunning); 203 rt->name = internal_strdup(tctx->name); 204 rt->parent_tid = tctx->parent_tid; 205 rt->workerthread = tctx->workerthread; 206 rt->stack = 0; 207 rt->stack = SymbolizeStackId(tctx->creation_stack_id); 208 if (rt->stack) 209 rt->stack->suppressable = suppressable; 210 } 211 212 #if !SANITIZER_GO 213 static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) { 214 int unique_id = *(int *)arg; 215 return tctx->unique_id == (u32)unique_id; 216 } 217 218 static ThreadContext *FindThreadByUidLocked(int unique_id) { 219 ctx->thread_registry->CheckLocked(); 220 return static_cast<ThreadContext *>( 221 ctx->thread_registry->FindThreadContextLocked( 222 FindThreadByUidLockedCallback, &unique_id)); 223 } 224 225 static ThreadContext *FindThreadByTidLocked(int tid) { 226 ctx->thread_registry->CheckLocked(); 227 return static_cast<ThreadContext*>( 228 ctx->thread_registry->GetThreadLocked(tid)); 229 } 230 231 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) { 232 uptr addr = (uptr)arg; 233 ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base); 234 if (tctx->status != ThreadStatusRunning) 235 return false; 236 ThreadState *thr = tctx->thr; 237 CHECK(thr); 238 return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) || 239 (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size)); 240 } 241 242 ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) { 243 ctx->thread_registry->CheckLocked(); 244 ThreadContext *tctx = static_cast<ThreadContext*>( 245 ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls, 246 (void*)addr)); 247 if (!tctx) 248 return 0; 249 ThreadState *thr = tctx->thr; 250 CHECK(thr); 251 *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size); 252 return tctx; 253 } 254 #endif 255 256 void ScopedReport::AddThread(int unique_tid, bool suppressable) { 257 #if !SANITIZER_GO 258 if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid)) 259 AddThread(tctx, suppressable); 260 #endif 261 } 262 263 void ScopedReport::AddMutex(const SyncVar *s) { 264 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 265 if (rep_->mutexes[i]->id == s->uid) 266 return; 267 } 268 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 269 ReportMutex *rm = new(mem) ReportMutex; 270 rep_->mutexes.PushBack(rm); 271 rm->id = s->uid; 272 rm->addr = s->addr; 273 rm->destroyed = false; 274 rm->stack = SymbolizeStackId(s->creation_stack_id); 275 } 276 277 u64 ScopedReport::AddMutex(u64 id) { 278 u64 uid = 0; 279 u64 mid = id; 280 uptr addr = SyncVar::SplitId(id, &uid); 281 SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true); 282 // Check that the mutex is still alive. 283 // Another mutex can be created at the same address, 284 // so check uid as well. 285 if (s && s->CheckId(uid)) { 286 mid = s->uid; 287 AddMutex(s); 288 } else { 289 AddDeadMutex(id); 290 } 291 if (s) 292 s->mtx.Unlock(); 293 return mid; 294 } 295 296 void ScopedReport::AddDeadMutex(u64 id) { 297 for (uptr i = 0; i < rep_->mutexes.Size(); i++) { 298 if (rep_->mutexes[i]->id == id) 299 return; 300 } 301 void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex)); 302 ReportMutex *rm = new(mem) ReportMutex; 303 rep_->mutexes.PushBack(rm); 304 rm->id = id; 305 rm->addr = 0; 306 rm->destroyed = true; 307 rm->stack = 0; 308 } 309 310 void ScopedReport::AddLocation(uptr addr, uptr size) { 311 if (addr == 0) 312 return; 313 #if !SANITIZER_GO 314 int fd = -1; 315 int creat_tid = kInvalidTid; 316 u32 creat_stack = 0; 317 if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) { 318 ReportLocation *loc = ReportLocation::New(ReportLocationFD); 319 loc->fd = fd; 320 loc->tid = creat_tid; 321 loc->stack = SymbolizeStackId(creat_stack); 322 rep_->locs.PushBack(loc); 323 ThreadContext *tctx = FindThreadByUidLocked(creat_tid); 324 if (tctx) 325 AddThread(tctx); 326 return; 327 } 328 MBlock *b = 0; 329 Allocator *a = allocator(); 330 if (a->PointerIsMine((void*)addr)) { 331 void *block_begin = a->GetBlockBegin((void*)addr); 332 if (block_begin) 333 b = ctx->metamap.GetBlock((uptr)block_begin); 334 } 335 if (b != 0) { 336 ThreadContext *tctx = FindThreadByTidLocked(b->tid); 337 ReportLocation *loc = ReportLocation::New(ReportLocationHeap); 338 loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr); 339 loc->heap_chunk_size = b->siz; 340 loc->external_tag = b->tag; 341 loc->tid = tctx ? tctx->tid : b->tid; 342 loc->stack = SymbolizeStackId(b->stk); 343 rep_->locs.PushBack(loc); 344 if (tctx) 345 AddThread(tctx); 346 return; 347 } 348 bool is_stack = false; 349 if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) { 350 ReportLocation *loc = 351 ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS); 352 loc->tid = tctx->tid; 353 rep_->locs.PushBack(loc); 354 AddThread(tctx); 355 } 356 #endif 357 if (ReportLocation *loc = SymbolizeData(addr)) { 358 loc->suppressable = true; 359 rep_->locs.PushBack(loc); 360 return; 361 } 362 } 363 364 #if !SANITIZER_GO 365 void ScopedReport::AddSleep(u32 stack_id) { 366 rep_->sleep = SymbolizeStackId(stack_id); 367 } 368 #endif 369 370 void ScopedReport::SetCount(int count) { 371 rep_->count = count; 372 } 373 374 const ReportDesc *ScopedReport::GetReport() const { 375 return rep_; 376 } 377 378 void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk, 379 MutexSet *mset, uptr *tag) { 380 // This function restores stack trace and mutex set for the thread/epoch. 381 // It does so by getting stack trace and mutex set at the beginning of 382 // trace part, and then replaying the trace till the given epoch. 383 Trace* trace = ThreadTrace(tid); 384 ReadLock l(&trace->mtx); 385 const int partidx = (epoch / kTracePartSize) % TraceParts(); 386 TraceHeader* hdr = &trace->headers[partidx]; 387 if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize) 388 return; 389 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); 390 const u64 epoch0 = RoundDown(epoch, TraceSize()); 391 const u64 eend = epoch % TraceSize(); 392 const u64 ebegin = RoundDown(eend, kTracePartSize); 393 DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n", 394 tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx); 395 Vector<uptr> stack(MBlockReportStack); 396 stack.Resize(hdr->stack0.size + 64); 397 for (uptr i = 0; i < hdr->stack0.size; i++) { 398 stack[i] = hdr->stack0.trace[i]; 399 DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]); 400 } 401 if (mset) 402 *mset = hdr->mset0; 403 uptr pos = hdr->stack0.size; 404 Event *events = (Event*)GetThreadTrace(tid); 405 for (uptr i = ebegin; i <= eend; i++) { 406 Event ev = events[i]; 407 EventType typ = (EventType)(ev >> kEventPCBits); 408 uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1)); 409 DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc); 410 if (typ == EventTypeMop) { 411 stack[pos] = pc; 412 } else if (typ == EventTypeFuncEnter) { 413 if (stack.Size() < pos + 2) 414 stack.Resize(pos + 2); 415 stack[pos++] = pc; 416 } else if (typ == EventTypeFuncExit) { 417 if (pos > 0) 418 pos--; 419 } 420 if (mset) { 421 if (typ == EventTypeLock) { 422 mset->Add(pc, true, epoch0 + i); 423 } else if (typ == EventTypeUnlock) { 424 mset->Del(pc, true); 425 } else if (typ == EventTypeRLock) { 426 mset->Add(pc, false, epoch0 + i); 427 } else if (typ == EventTypeRUnlock) { 428 mset->Del(pc, false); 429 } 430 } 431 for (uptr j = 0; j <= pos; j++) 432 DPrintf2(" #%zu: %zx\n", j, stack[j]); 433 } 434 if (pos == 0 && stack[0] == 0) 435 return; 436 pos++; 437 stk->Init(&stack[0], pos); 438 ExtractTagFromStack(stk, tag); 439 } 440 441 static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], 442 uptr addr_min, uptr addr_max) { 443 bool equal_stack = false; 444 RacyStacks hash; 445 bool equal_address = false; 446 RacyAddress ra0 = {addr_min, addr_max}; 447 { 448 ReadLock lock(&ctx->racy_mtx); 449 if (flags()->suppress_equal_stacks) { 450 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); 451 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); 452 for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) { 453 if (hash == ctx->racy_stacks[i]) { 454 VPrintf(2, 455 "ThreadSanitizer: suppressing report as doubled (stack)\n"); 456 equal_stack = true; 457 break; 458 } 459 } 460 } 461 if (flags()->suppress_equal_addresses) { 462 for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) { 463 RacyAddress ra2 = ctx->racy_addresses[i]; 464 uptr maxbeg = max(ra0.addr_min, ra2.addr_min); 465 uptr minend = min(ra0.addr_max, ra2.addr_max); 466 if (maxbeg < minend) { 467 VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n"); 468 equal_address = true; 469 break; 470 } 471 } 472 } 473 } 474 if (!equal_stack && !equal_address) 475 return false; 476 if (!equal_stack) { 477 Lock lock(&ctx->racy_mtx); 478 ctx->racy_stacks.PushBack(hash); 479 } 480 if (!equal_address) { 481 Lock lock(&ctx->racy_mtx); 482 ctx->racy_addresses.PushBack(ra0); 483 } 484 return true; 485 } 486 487 static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2], 488 uptr addr_min, uptr addr_max) { 489 Lock lock(&ctx->racy_mtx); 490 if (flags()->suppress_equal_stacks) { 491 RacyStacks hash; 492 hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr)); 493 hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr)); 494 ctx->racy_stacks.PushBack(hash); 495 } 496 if (flags()->suppress_equal_addresses) { 497 RacyAddress ra0 = {addr_min, addr_max}; 498 ctx->racy_addresses.PushBack(ra0); 499 } 500 } 501 502 bool OutputReport(ThreadState *thr, const ScopedReport &srep) { 503 if (!flags()->report_bugs || thr->suppress_reports) 504 return false; 505 atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime()); 506 const ReportDesc *rep = srep.GetReport(); 507 CHECK_EQ(thr->current_report, nullptr); 508 thr->current_report = rep; 509 Suppression *supp = 0; 510 uptr pc_or_addr = 0; 511 for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++) 512 pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp); 513 for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++) 514 pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp); 515 for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++) 516 pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp); 517 for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++) 518 pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp); 519 if (pc_or_addr != 0) { 520 Lock lock(&ctx->fired_suppressions_mtx); 521 FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp}; 522 ctx->fired_suppressions.push_back(s); 523 } 524 { 525 bool old_is_freeing = thr->is_freeing; 526 thr->is_freeing = false; 527 bool suppressed = OnReport(rep, pc_or_addr != 0); 528 thr->is_freeing = old_is_freeing; 529 if (suppressed) { 530 thr->current_report = nullptr; 531 return false; 532 } 533 } 534 PrintReport(rep); 535 __tsan_on_report(rep); 536 ctx->nreported++; 537 if (flags()->halt_on_error) 538 Die(); 539 thr->current_report = nullptr; 540 return true; 541 } 542 543 bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) { 544 ReadLock lock(&ctx->fired_suppressions_mtx); 545 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 546 if (ctx->fired_suppressions[k].type != type) 547 continue; 548 for (uptr j = 0; j < trace.size; j++) { 549 FiredSuppression *s = &ctx->fired_suppressions[k]; 550 if (trace.trace[j] == s->pc_or_addr) { 551 if (s->supp) 552 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 553 return true; 554 } 555 } 556 } 557 return false; 558 } 559 560 static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) { 561 ReadLock lock(&ctx->fired_suppressions_mtx); 562 for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) { 563 if (ctx->fired_suppressions[k].type != type) 564 continue; 565 FiredSuppression *s = &ctx->fired_suppressions[k]; 566 if (addr == s->pc_or_addr) { 567 if (s->supp) 568 atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed); 569 return true; 570 } 571 } 572 return false; 573 } 574 575 static bool RaceBetweenAtomicAndFree(ThreadState *thr) { 576 Shadow s0(thr->racy_state[0]); 577 Shadow s1(thr->racy_state[1]); 578 CHECK(!(s0.IsAtomic() && s1.IsAtomic())); 579 if (!s0.IsAtomic() && !s1.IsAtomic()) 580 return true; 581 if (s0.IsAtomic() && s1.IsFreed()) 582 return true; 583 if (s1.IsAtomic() && thr->is_freeing) 584 return true; 585 return false; 586 } 587 588 void ReportRace(ThreadState *thr) { 589 CheckNoLocks(thr); 590 591 // Symbolizer makes lots of intercepted calls. If we try to process them, 592 // at best it will cause deadlocks on internal mutexes. 593 ScopedIgnoreInterceptors ignore; 594 595 if (!flags()->report_bugs) 596 return; 597 if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr)) 598 return; 599 600 bool freed = false; 601 { 602 Shadow s(thr->racy_state[1]); 603 freed = s.GetFreedAndReset(); 604 thr->racy_state[1] = s.raw(); 605 } 606 607 uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); 608 uptr addr_min = 0; 609 uptr addr_max = 0; 610 { 611 uptr a0 = addr + Shadow(thr->racy_state[0]).addr0(); 612 uptr a1 = addr + Shadow(thr->racy_state[1]).addr0(); 613 uptr e0 = a0 + Shadow(thr->racy_state[0]).size(); 614 uptr e1 = a1 + Shadow(thr->racy_state[1]).size(); 615 addr_min = min(a0, a1); 616 addr_max = max(e0, e1); 617 if (IsExpectedReport(addr_min, addr_max - addr_min)) 618 return; 619 } 620 621 ReportType typ = ReportTypeRace; 622 if (thr->is_vptr_access && freed) 623 typ = ReportTypeVptrUseAfterFree; 624 else if (thr->is_vptr_access) 625 typ = ReportTypeVptrRace; 626 else if (freed) 627 typ = ReportTypeUseAfterFree; 628 629 if (IsFiredSuppression(ctx, typ, addr)) 630 return; 631 632 const uptr kMop = 2; 633 VarSizeStackTrace traces[kMop]; 634 uptr tags[kMop] = {kExternalTagNone}; 635 uptr toppc = TraceTopPC(thr); 636 if (toppc >> kEventPCBits) { 637 // This is a work-around for a known issue. 638 // The scenario where this happens is rather elaborate and requires 639 // an instrumented __sanitizer_report_error_summary callback and 640 // a __tsan_symbolize_external callback and a race during a range memory 641 // access larger than 8 bytes. MemoryAccessRange adds the current PC to 642 // the trace and starts processing memory accesses. A first memory access 643 // triggers a race, we report it and call the instrumented 644 // __sanitizer_report_error_summary, which adds more stuff to the trace 645 // since it is intrumented. Then a second memory access in MemoryAccessRange 646 // also triggers a race and we get here and call TraceTopPC to get the 647 // current PC, however now it contains some unrelated events from the 648 // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit 649 // event. Later we subtract -1 from it (in GetPreviousInstructionPc) 650 // and the resulting PC has kExternalPCBit set, so we pass it to 651 // __tsan_symbolize_external. __tsan_symbolize_external is within its rights 652 // to crash since the PC is completely bogus. 653 // test/tsan/double_race.cc contains a test case for this. 654 toppc = 0; 655 } 656 ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]); 657 if (IsFiredSuppression(ctx, typ, traces[0])) 658 return; 659 660 // MutexSet is too large to live on stack. 661 Vector<u64> mset_buffer(MBlockScopedBuf); 662 mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1); 663 MutexSet *mset2 = new(&mset_buffer[0]) MutexSet(); 664 665 Shadow s2(thr->racy_state[1]); 666 RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]); 667 if (IsFiredSuppression(ctx, typ, traces[1])) 668 return; 669 670 if (HandleRacyStacks(thr, traces, addr_min, addr_max)) 671 return; 672 673 // If any of the accesses has a tag, treat this as an "external" race. 674 uptr tag = kExternalTagNone; 675 for (uptr i = 0; i < kMop; i++) { 676 if (tags[i] != kExternalTagNone) { 677 typ = ReportTypeExternalRace; 678 tag = tags[i]; 679 break; 680 } 681 } 682 683 ThreadRegistryLock l0(ctx->thread_registry); 684 ScopedReport rep(typ, tag); 685 for (uptr i = 0; i < kMop; i++) { 686 Shadow s(thr->racy_state[i]); 687 rep.AddMemoryAccess(addr, tags[i], s, traces[i], 688 i == 0 ? &thr->mset : mset2); 689 } 690 691 for (uptr i = 0; i < kMop; i++) { 692 FastState s(thr->racy_state[i]); 693 ThreadContext *tctx = static_cast<ThreadContext*>( 694 ctx->thread_registry->GetThreadLocked(s.tid())); 695 if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1) 696 continue; 697 rep.AddThread(tctx); 698 } 699 700 rep.AddLocation(addr_min, addr_max - addr_min); 701 702 #if !SANITIZER_GO 703 { // NOLINT 704 Shadow s(thr->racy_state[1]); 705 if (s.epoch() <= thr->last_sleep_clock.get(s.tid())) 706 rep.AddSleep(thr->last_sleep_stack_id); 707 } 708 #endif 709 710 if (!OutputReport(thr, rep)) 711 return; 712 713 AddRacyStacks(thr, traces, addr_min, addr_max); 714 } 715 716 void PrintCurrentStack(ThreadState *thr, uptr pc) { 717 VarSizeStackTrace trace; 718 ObtainCurrentStack(thr, pc, &trace); 719 PrintStack(SymbolizeStack(trace)); 720 } 721 722 // Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes 723 // __sanitizer_print_stack_trace exists in the actual unwinded stack, but 724 // tail-call to PrintCurrentStackSlow breaks this assumption because 725 // __sanitizer_print_stack_trace disappears after tail-call. 726 // However, this solution is not reliable enough, please see dvyukov's comment 727 // http://reviews.llvm.org/D19148#406208 728 // Also see PR27280 comment 2 and 3 for breaking examples and analysis. 729 ALWAYS_INLINE 730 void PrintCurrentStackSlow(uptr pc) { 731 #if !SANITIZER_GO 732 BufferedStackTrace *ptrace = 733 new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace))) 734 BufferedStackTrace(); 735 ptrace->Unwind(kStackTraceMax, pc, 0, 0, 0, 0, false); 736 for (uptr i = 0; i < ptrace->size / 2; i++) { 737 uptr tmp = ptrace->trace_buffer[i]; 738 ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1]; 739 ptrace->trace_buffer[ptrace->size - i - 1] = tmp; 740 } 741 PrintStack(SymbolizeStack(*ptrace)); 742 #endif 743 } 744 745 } // namespace __tsan 746 747 using namespace __tsan; 748 749 extern "C" { 750 SANITIZER_INTERFACE_ATTRIBUTE 751 void __sanitizer_print_stack_trace() { 752 PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 753 } 754 } // extern "C" 755