168d75effSDimitry Andric //===-- tsan_rtl.cpp ------------------------------------------------------===// 268d75effSDimitry Andric // 368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 668d75effSDimitry Andric // 768d75effSDimitry Andric //===----------------------------------------------------------------------===// 868d75effSDimitry Andric // 968d75effSDimitry Andric // This file is a part of ThreadSanitizer (TSan), a race detector. 1068d75effSDimitry Andric // 1168d75effSDimitry Andric // Main file (entry points) for the TSan run-time. 1268d75effSDimitry Andric //===----------------------------------------------------------------------===// 1368d75effSDimitry Andric 14fe6060f1SDimitry Andric #include "tsan_rtl.h" 15fe6060f1SDimitry Andric 1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_atomic.h" 1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h" 1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_file.h" 1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_libc.h" 2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 21fe6060f1SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_symbolizer.h" 2368d75effSDimitry Andric #include "tsan_defs.h" 24fe6060f1SDimitry Andric #include "tsan_interface.h" 2568d75effSDimitry Andric #include "tsan_mman.h" 26fe6060f1SDimitry Andric #include "tsan_platform.h" 2768d75effSDimitry Andric #include "tsan_suppressions.h" 2868d75effSDimitry Andric #include "tsan_symbolize.h" 2968d75effSDimitry Andric #include "ubsan/ubsan_init.h" 3068d75effSDimitry Andric 3168d75effSDimitry Andric volatile int __tsan_resumed = 0; 3268d75effSDimitry Andric 3368d75effSDimitry Andric extern "C" void __tsan_resume() { 3468d75effSDimitry Andric __tsan_resumed = 1; 3568d75effSDimitry Andric } 3668d75effSDimitry Andric 374824e7fdSDimitry Andric SANITIZER_WEAK_DEFAULT_IMPL 384824e7fdSDimitry Andric void __tsan_test_only_on_fork() {} 394824e7fdSDimitry Andric 4068d75effSDimitry Andric namespace __tsan { 4168d75effSDimitry Andric 42349cc55cSDimitry Andric #if !SANITIZER_GO 43349cc55cSDimitry Andric void (*on_initialize)(void); 44349cc55cSDimitry Andric int (*on_finalize)(int); 45349cc55cSDimitry Andric #endif 46349cc55cSDimitry Andric 4768d75effSDimitry Andric #if !SANITIZER_GO && !SANITIZER_MAC 4868d75effSDimitry Andric __attribute__((tls_model("initial-exec"))) 49349cc55cSDimitry Andric THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED( 50349cc55cSDimitry Andric SANITIZER_CACHE_LINE_SIZE); 5168d75effSDimitry Andric #endif 52349cc55cSDimitry Andric static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE); 5368d75effSDimitry Andric Context *ctx; 5468d75effSDimitry Andric 5568d75effSDimitry Andric // Can be overriden by a front-end. 5668d75effSDimitry Andric #ifdef TSAN_EXTERNAL_HOOKS 5768d75effSDimitry Andric bool OnFinalize(bool failed); 5868d75effSDimitry Andric void OnInitialize(); 5968d75effSDimitry Andric #else 6068d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 6168d75effSDimitry Andric bool OnFinalize(bool failed) { 62fe6060f1SDimitry Andric # if !SANITIZER_GO 63349cc55cSDimitry Andric if (on_finalize) 64349cc55cSDimitry Andric return on_finalize(failed); 65fe6060f1SDimitry Andric # endif 6668d75effSDimitry Andric return failed; 6768d75effSDimitry Andric } 68*0eae32dcSDimitry Andric 6968d75effSDimitry Andric SANITIZER_WEAK_CXX_DEFAULT_IMPL 70fe6060f1SDimitry Andric void OnInitialize() { 71fe6060f1SDimitry Andric # if !SANITIZER_GO 72349cc55cSDimitry Andric if (on_initialize) 73349cc55cSDimitry Andric on_initialize(); 74fe6060f1SDimitry Andric # endif 75fe6060f1SDimitry Andric } 7668d75effSDimitry Andric #endif 7768d75effSDimitry Andric 78*0eae32dcSDimitry Andric static TracePart* TracePartAlloc(ThreadState* thr) { 79*0eae32dcSDimitry Andric TracePart* part = nullptr; 80*0eae32dcSDimitry Andric { 81*0eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx); 82*0eae32dcSDimitry Andric uptr max_parts = Trace::kMinParts + flags()->history_size; 83*0eae32dcSDimitry Andric Trace* trace = &thr->tctx->trace; 84*0eae32dcSDimitry Andric if (trace->parts_allocated == max_parts || 85*0eae32dcSDimitry Andric ctx->trace_part_finished_excess) { 86*0eae32dcSDimitry Andric part = ctx->trace_part_recycle.PopFront(); 87*0eae32dcSDimitry Andric DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part); 88*0eae32dcSDimitry Andric if (part && part->trace) { 89*0eae32dcSDimitry Andric Trace* trace1 = part->trace; 90*0eae32dcSDimitry Andric Lock trace_lock(&trace1->mtx); 91*0eae32dcSDimitry Andric part->trace = nullptr; 92*0eae32dcSDimitry Andric TracePart* part1 = trace1->parts.PopFront(); 93*0eae32dcSDimitry Andric CHECK_EQ(part, part1); 94*0eae32dcSDimitry Andric if (trace1->parts_allocated > trace1->parts.Size()) { 95*0eae32dcSDimitry Andric ctx->trace_part_finished_excess += 96*0eae32dcSDimitry Andric trace1->parts_allocated - trace1->parts.Size(); 97*0eae32dcSDimitry Andric trace1->parts_allocated = trace1->parts.Size(); 98fe6060f1SDimitry Andric } 99fe6060f1SDimitry Andric } 100*0eae32dcSDimitry Andric } 101*0eae32dcSDimitry Andric if (trace->parts_allocated < max_parts) { 102*0eae32dcSDimitry Andric trace->parts_allocated++; 103*0eae32dcSDimitry Andric if (ctx->trace_part_finished_excess) 104*0eae32dcSDimitry Andric ctx->trace_part_finished_excess--; 105*0eae32dcSDimitry Andric } 106*0eae32dcSDimitry Andric if (!part) 107*0eae32dcSDimitry Andric ctx->trace_part_total_allocated++; 108*0eae32dcSDimitry Andric else if (ctx->trace_part_recycle_finished) 109*0eae32dcSDimitry Andric ctx->trace_part_recycle_finished--; 110*0eae32dcSDimitry Andric } 111*0eae32dcSDimitry Andric if (!part) 112*0eae32dcSDimitry Andric part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart(); 113*0eae32dcSDimitry Andric return part; 11468d75effSDimitry Andric } 11568d75effSDimitry Andric 116*0eae32dcSDimitry Andric static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) { 117*0eae32dcSDimitry Andric DCHECK(part->trace); 118*0eae32dcSDimitry Andric part->trace = nullptr; 119*0eae32dcSDimitry Andric ctx->trace_part_recycle.PushFront(part); 120*0eae32dcSDimitry Andric } 121*0eae32dcSDimitry Andric 122*0eae32dcSDimitry Andric void TraceResetForTesting() { 123*0eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx); 124*0eae32dcSDimitry Andric while (auto* part = ctx->trace_part_recycle.PopFront()) { 125*0eae32dcSDimitry Andric if (auto trace = part->trace) 126*0eae32dcSDimitry Andric CHECK_EQ(trace->parts.PopFront(), part); 127*0eae32dcSDimitry Andric UnmapOrDie(part, sizeof(*part)); 128*0eae32dcSDimitry Andric } 129*0eae32dcSDimitry Andric ctx->trace_part_total_allocated = 0; 130*0eae32dcSDimitry Andric ctx->trace_part_recycle_finished = 0; 131*0eae32dcSDimitry Andric ctx->trace_part_finished_excess = 0; 132*0eae32dcSDimitry Andric } 133*0eae32dcSDimitry Andric 134*0eae32dcSDimitry Andric static void DoResetImpl(uptr epoch) { 135*0eae32dcSDimitry Andric ThreadRegistryLock lock0(&ctx->thread_registry); 136*0eae32dcSDimitry Andric Lock lock1(&ctx->slot_mtx); 137*0eae32dcSDimitry Andric CHECK_EQ(ctx->global_epoch, epoch); 138*0eae32dcSDimitry Andric ctx->global_epoch++; 139*0eae32dcSDimitry Andric CHECK(!ctx->resetting); 140*0eae32dcSDimitry Andric ctx->resetting = true; 141*0eae32dcSDimitry Andric for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) { 142*0eae32dcSDimitry Andric ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked( 143*0eae32dcSDimitry Andric static_cast<Tid>(i)); 144*0eae32dcSDimitry Andric // Potentially we could purge all ThreadStatusDead threads from the 145*0eae32dcSDimitry Andric // registry. Since we reset all shadow, they can't race with anything 146*0eae32dcSDimitry Andric // anymore. However, their tid's can still be stored in some aux places 147*0eae32dcSDimitry Andric // (e.g. tid of thread that created something). 148*0eae32dcSDimitry Andric auto trace = &tctx->trace; 149*0eae32dcSDimitry Andric Lock lock(&trace->mtx); 150*0eae32dcSDimitry Andric bool attached = tctx->thr && tctx->thr->slot; 151*0eae32dcSDimitry Andric auto parts = &trace->parts; 152*0eae32dcSDimitry Andric bool local = false; 153*0eae32dcSDimitry Andric while (!parts->Empty()) { 154*0eae32dcSDimitry Andric auto part = parts->Front(); 155*0eae32dcSDimitry Andric local = local || part == trace->local_head; 156*0eae32dcSDimitry Andric if (local) 157*0eae32dcSDimitry Andric CHECK(!ctx->trace_part_recycle.Queued(part)); 158*0eae32dcSDimitry Andric else 159*0eae32dcSDimitry Andric ctx->trace_part_recycle.Remove(part); 160*0eae32dcSDimitry Andric if (attached && parts->Size() == 1) { 161*0eae32dcSDimitry Andric // The thread is running and this is the last/current part. 162*0eae32dcSDimitry Andric // Set the trace position to the end of the current part 163*0eae32dcSDimitry Andric // to force the thread to call SwitchTracePart and re-attach 164*0eae32dcSDimitry Andric // to a new slot and allocate a new trace part. 165*0eae32dcSDimitry Andric // Note: the thread is concurrently modifying the position as well, 166*0eae32dcSDimitry Andric // so this is only best-effort. The thread can only modify position 167*0eae32dcSDimitry Andric // within this part, because switching parts is protected by 168*0eae32dcSDimitry Andric // slot/trace mutexes that we hold here. 169*0eae32dcSDimitry Andric atomic_store_relaxed( 170*0eae32dcSDimitry Andric &tctx->thr->trace_pos, 171*0eae32dcSDimitry Andric reinterpret_cast<uptr>(&part->events[TracePart::kSize])); 172*0eae32dcSDimitry Andric break; 173*0eae32dcSDimitry Andric } 174*0eae32dcSDimitry Andric parts->Remove(part); 175*0eae32dcSDimitry Andric TracePartFree(part); 176*0eae32dcSDimitry Andric } 177*0eae32dcSDimitry Andric CHECK_LE(parts->Size(), 1); 178*0eae32dcSDimitry Andric trace->local_head = parts->Front(); 179*0eae32dcSDimitry Andric if (tctx->thr && !tctx->thr->slot) { 180*0eae32dcSDimitry Andric atomic_store_relaxed(&tctx->thr->trace_pos, 0); 181*0eae32dcSDimitry Andric tctx->thr->trace_prev_pc = 0; 182*0eae32dcSDimitry Andric } 183*0eae32dcSDimitry Andric if (trace->parts_allocated > trace->parts.Size()) { 184*0eae32dcSDimitry Andric ctx->trace_part_finished_excess += 185*0eae32dcSDimitry Andric trace->parts_allocated - trace->parts.Size(); 186*0eae32dcSDimitry Andric trace->parts_allocated = trace->parts.Size(); 187*0eae32dcSDimitry Andric } 188*0eae32dcSDimitry Andric } 189*0eae32dcSDimitry Andric while (ctx->slot_queue.PopFront()) { 190*0eae32dcSDimitry Andric } 191*0eae32dcSDimitry Andric for (auto& slot : ctx->slots) { 192*0eae32dcSDimitry Andric slot.SetEpoch(kEpochZero); 193*0eae32dcSDimitry Andric slot.journal.Reset(); 194*0eae32dcSDimitry Andric slot.thr = nullptr; 195*0eae32dcSDimitry Andric ctx->slot_queue.PushBack(&slot); 196*0eae32dcSDimitry Andric } 197*0eae32dcSDimitry Andric 198*0eae32dcSDimitry Andric DPrintf("Resetting shadow...\n"); 199*0eae32dcSDimitry Andric if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), 200*0eae32dcSDimitry Andric "shadow")) { 201*0eae32dcSDimitry Andric Printf("failed to reset shadow memory\n"); 202*0eae32dcSDimitry Andric Die(); 203*0eae32dcSDimitry Andric } 204*0eae32dcSDimitry Andric DPrintf("Resetting meta shadow...\n"); 205*0eae32dcSDimitry Andric ctx->metamap.ResetClocks(); 206*0eae32dcSDimitry Andric ctx->resetting = false; 207*0eae32dcSDimitry Andric } 208*0eae32dcSDimitry Andric 209*0eae32dcSDimitry Andric // Clang does not understand locking all slots in the loop: 210*0eae32dcSDimitry Andric // error: expecting mutex 'slot.mtx' to be held at start of each loop 211*0eae32dcSDimitry Andric void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS { 212*0eae32dcSDimitry Andric { 213*0eae32dcSDimitry Andric for (auto& slot : ctx->slots) { 214*0eae32dcSDimitry Andric slot.mtx.Lock(); 215*0eae32dcSDimitry Andric if (UNLIKELY(epoch == 0)) 216*0eae32dcSDimitry Andric epoch = ctx->global_epoch; 217*0eae32dcSDimitry Andric if (UNLIKELY(epoch != ctx->global_epoch)) { 218*0eae32dcSDimitry Andric // Epoch can't change once we've locked the first slot. 219*0eae32dcSDimitry Andric CHECK_EQ(slot.sid, 0); 220*0eae32dcSDimitry Andric slot.mtx.Unlock(); 221*0eae32dcSDimitry Andric return; 222*0eae32dcSDimitry Andric } 223*0eae32dcSDimitry Andric } 224*0eae32dcSDimitry Andric } 225*0eae32dcSDimitry Andric DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch); 226*0eae32dcSDimitry Andric DoResetImpl(epoch); 227*0eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Unlock(); 228*0eae32dcSDimitry Andric } 229*0eae32dcSDimitry Andric 230*0eae32dcSDimitry Andric void FlushShadowMemory() { DoReset(nullptr, 0); } 231*0eae32dcSDimitry Andric 232*0eae32dcSDimitry Andric static TidSlot* FindSlotAndLock(ThreadState* thr) 233*0eae32dcSDimitry Andric ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS { 234*0eae32dcSDimitry Andric CHECK(!thr->slot); 235*0eae32dcSDimitry Andric TidSlot* slot = nullptr; 236*0eae32dcSDimitry Andric for (;;) { 237*0eae32dcSDimitry Andric uptr epoch; 238*0eae32dcSDimitry Andric { 239*0eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx); 240*0eae32dcSDimitry Andric epoch = ctx->global_epoch; 241*0eae32dcSDimitry Andric if (slot) { 242*0eae32dcSDimitry Andric // This is an exhausted slot from the previous iteration. 243*0eae32dcSDimitry Andric if (ctx->slot_queue.Queued(slot)) 244*0eae32dcSDimitry Andric ctx->slot_queue.Remove(slot); 245*0eae32dcSDimitry Andric thr->slot_locked = false; 246*0eae32dcSDimitry Andric slot->mtx.Unlock(); 247*0eae32dcSDimitry Andric } 248*0eae32dcSDimitry Andric for (;;) { 249*0eae32dcSDimitry Andric slot = ctx->slot_queue.PopFront(); 250*0eae32dcSDimitry Andric if (!slot) 251*0eae32dcSDimitry Andric break; 252*0eae32dcSDimitry Andric if (slot->epoch() != kEpochLast) { 253*0eae32dcSDimitry Andric ctx->slot_queue.PushBack(slot); 254*0eae32dcSDimitry Andric break; 255*0eae32dcSDimitry Andric } 256*0eae32dcSDimitry Andric } 257*0eae32dcSDimitry Andric } 258*0eae32dcSDimitry Andric if (!slot) { 259*0eae32dcSDimitry Andric DoReset(thr, epoch); 260*0eae32dcSDimitry Andric continue; 261*0eae32dcSDimitry Andric } 262*0eae32dcSDimitry Andric slot->mtx.Lock(); 263*0eae32dcSDimitry Andric CHECK(!thr->slot_locked); 264*0eae32dcSDimitry Andric thr->slot_locked = true; 265*0eae32dcSDimitry Andric if (slot->thr) { 266*0eae32dcSDimitry Andric DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid, 267*0eae32dcSDimitry Andric slot->thr->tid); 268*0eae32dcSDimitry Andric slot->SetEpoch(slot->thr->fast_state.epoch()); 269*0eae32dcSDimitry Andric slot->thr = nullptr; 270*0eae32dcSDimitry Andric } 271*0eae32dcSDimitry Andric if (slot->epoch() != kEpochLast) 272*0eae32dcSDimitry Andric return slot; 273*0eae32dcSDimitry Andric } 274*0eae32dcSDimitry Andric } 275*0eae32dcSDimitry Andric 276*0eae32dcSDimitry Andric void SlotAttachAndLock(ThreadState* thr) { 277*0eae32dcSDimitry Andric TidSlot* slot = FindSlotAndLock(thr); 278*0eae32dcSDimitry Andric DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid)); 279*0eae32dcSDimitry Andric CHECK(!slot->thr); 280*0eae32dcSDimitry Andric CHECK(!thr->slot); 281*0eae32dcSDimitry Andric slot->thr = thr; 282*0eae32dcSDimitry Andric thr->slot = slot; 283*0eae32dcSDimitry Andric Epoch epoch = EpochInc(slot->epoch()); 284*0eae32dcSDimitry Andric CHECK(!EpochOverflow(epoch)); 285*0eae32dcSDimitry Andric slot->SetEpoch(epoch); 286*0eae32dcSDimitry Andric thr->fast_state.SetSid(slot->sid); 287*0eae32dcSDimitry Andric thr->fast_state.SetEpoch(epoch); 288*0eae32dcSDimitry Andric if (thr->slot_epoch != ctx->global_epoch) { 289*0eae32dcSDimitry Andric thr->slot_epoch = ctx->global_epoch; 290*0eae32dcSDimitry Andric thr->clock.Reset(); 29168d75effSDimitry Andric #if !SANITIZER_GO 292*0eae32dcSDimitry Andric thr->last_sleep_stack_id = kInvalidStackID; 293*0eae32dcSDimitry Andric thr->last_sleep_clock.Reset(); 29468d75effSDimitry Andric #endif 295*0eae32dcSDimitry Andric } 296*0eae32dcSDimitry Andric thr->clock.Set(slot->sid, epoch); 297*0eae32dcSDimitry Andric slot->journal.PushBack({thr->tid, epoch}); 298*0eae32dcSDimitry Andric } 299*0eae32dcSDimitry Andric 300*0eae32dcSDimitry Andric static void SlotDetachImpl(ThreadState* thr, bool exiting) { 301*0eae32dcSDimitry Andric TidSlot* slot = thr->slot; 302*0eae32dcSDimitry Andric thr->slot = nullptr; 303*0eae32dcSDimitry Andric if (thr != slot->thr) { 304*0eae32dcSDimitry Andric slot = nullptr; // we don't own the slot anymore 305*0eae32dcSDimitry Andric if (thr->slot_epoch != ctx->global_epoch) { 306*0eae32dcSDimitry Andric TracePart* part = nullptr; 307*0eae32dcSDimitry Andric auto* trace = &thr->tctx->trace; 308*0eae32dcSDimitry Andric { 309*0eae32dcSDimitry Andric Lock l(&trace->mtx); 310*0eae32dcSDimitry Andric auto* parts = &trace->parts; 311*0eae32dcSDimitry Andric // The trace can be completely empty in an unlikely event 312*0eae32dcSDimitry Andric // the thread is preempted right after it acquired the slot 313*0eae32dcSDimitry Andric // in ThreadStart and did not trace any events yet. 314*0eae32dcSDimitry Andric CHECK_LE(parts->Size(), 1); 315*0eae32dcSDimitry Andric part = parts->PopFront(); 316*0eae32dcSDimitry Andric thr->tctx->trace.local_head = nullptr; 317*0eae32dcSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 0); 318*0eae32dcSDimitry Andric thr->trace_prev_pc = 0; 319*0eae32dcSDimitry Andric } 320*0eae32dcSDimitry Andric if (part) { 321*0eae32dcSDimitry Andric Lock l(&ctx->slot_mtx); 322*0eae32dcSDimitry Andric TracePartFree(part); 323*0eae32dcSDimitry Andric } 324*0eae32dcSDimitry Andric } 325*0eae32dcSDimitry Andric return; 326*0eae32dcSDimitry Andric } 327*0eae32dcSDimitry Andric CHECK(exiting || thr->fast_state.epoch() == kEpochLast); 328*0eae32dcSDimitry Andric slot->SetEpoch(thr->fast_state.epoch()); 329*0eae32dcSDimitry Andric slot->thr = nullptr; 330*0eae32dcSDimitry Andric } 331*0eae32dcSDimitry Andric 332*0eae32dcSDimitry Andric void SlotDetach(ThreadState* thr) { 333*0eae32dcSDimitry Andric Lock lock(&thr->slot->mtx); 334*0eae32dcSDimitry Andric SlotDetachImpl(thr, true); 335*0eae32dcSDimitry Andric } 336*0eae32dcSDimitry Andric 337*0eae32dcSDimitry Andric void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS { 338*0eae32dcSDimitry Andric DCHECK(!thr->slot_locked); 339*0eae32dcSDimitry Andric #if SANITIZER_DEBUG 340*0eae32dcSDimitry Andric // Check these mutexes are not locked. 341*0eae32dcSDimitry Andric // We can call DoReset from SlotAttachAndLock, which will lock 342*0eae32dcSDimitry Andric // these mutexes, but it happens only every once in a while. 343*0eae32dcSDimitry Andric { ThreadRegistryLock lock(&ctx->thread_registry); } 344*0eae32dcSDimitry Andric { Lock lock(&ctx->slot_mtx); } 345*0eae32dcSDimitry Andric #endif 346*0eae32dcSDimitry Andric TidSlot* slot = thr->slot; 347*0eae32dcSDimitry Andric slot->mtx.Lock(); 348*0eae32dcSDimitry Andric thr->slot_locked = true; 349*0eae32dcSDimitry Andric if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast)) 350*0eae32dcSDimitry Andric return; 351*0eae32dcSDimitry Andric SlotDetachImpl(thr, false); 352*0eae32dcSDimitry Andric thr->slot_locked = false; 353*0eae32dcSDimitry Andric slot->mtx.Unlock(); 354*0eae32dcSDimitry Andric SlotAttachAndLock(thr); 355*0eae32dcSDimitry Andric } 356*0eae32dcSDimitry Andric 357*0eae32dcSDimitry Andric void SlotUnlock(ThreadState* thr) { 358*0eae32dcSDimitry Andric DCHECK(thr->slot_locked); 359*0eae32dcSDimitry Andric thr->slot_locked = false; 360*0eae32dcSDimitry Andric thr->slot->mtx.Unlock(); 361*0eae32dcSDimitry Andric } 36268d75effSDimitry Andric 36368d75effSDimitry Andric Context::Context() 364fe6060f1SDimitry Andric : initialized(), 365fe6060f1SDimitry Andric report_mtx(MutexTypeReport), 366fe6060f1SDimitry Andric nreported(), 367*0eae32dcSDimitry Andric thread_registry([](Tid tid) -> ThreadContextBase* { 368*0eae32dcSDimitry Andric return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid); 369*0eae32dcSDimitry Andric }), 370fe6060f1SDimitry Andric racy_mtx(MutexTypeRacy), 371fe6060f1SDimitry Andric racy_stacks(), 372fe6060f1SDimitry Andric racy_addresses(), 373fe6060f1SDimitry Andric fired_suppressions_mtx(MutexTypeFired), 374*0eae32dcSDimitry Andric slot_mtx(MutexTypeSlots), 375*0eae32dcSDimitry Andric resetting() { 37668d75effSDimitry Andric fired_suppressions.reserve(8); 377*0eae32dcSDimitry Andric for (uptr i = 0; i < ARRAY_SIZE(slots); i++) { 378*0eae32dcSDimitry Andric TidSlot* slot = &slots[i]; 379*0eae32dcSDimitry Andric slot->sid = static_cast<Sid>(i); 380*0eae32dcSDimitry Andric slot_queue.PushBack(slot); 381*0eae32dcSDimitry Andric } 382*0eae32dcSDimitry Andric global_epoch = 1; 38368d75effSDimitry Andric } 38468d75effSDimitry Andric 385*0eae32dcSDimitry Andric TidSlot::TidSlot() : mtx(MutexTypeSlot) {} 386*0eae32dcSDimitry Andric 38768d75effSDimitry Andric // The objects are allocated in TLS, so one may rely on zero-initialization. 388*0eae32dcSDimitry Andric ThreadState::ThreadState(Tid tid) 38968d75effSDimitry Andric // Do not touch these, rely on zero initialization, 39068d75effSDimitry Andric // they may be accessed before the ctor. 391*0eae32dcSDimitry Andric // ignore_reads_and_writes() 392*0eae32dcSDimitry Andric // ignore_interceptors() 393*0eae32dcSDimitry Andric : tid(tid) { 394349cc55cSDimitry Andric CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0); 395349cc55cSDimitry Andric #if !SANITIZER_GO 396349cc55cSDimitry Andric // C/C++ uses fixed size shadow stack. 397349cc55cSDimitry Andric const int kInitStackSize = kShadowStackSize; 398349cc55cSDimitry Andric shadow_stack = static_cast<uptr*>( 399349cc55cSDimitry Andric MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack")); 400349cc55cSDimitry Andric SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack), 401349cc55cSDimitry Andric kInitStackSize * sizeof(uptr)); 402349cc55cSDimitry Andric #else 403349cc55cSDimitry Andric // Go uses malloc-allocated shadow stack with dynamic size. 404349cc55cSDimitry Andric const int kInitStackSize = 8; 405349cc55cSDimitry Andric shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr))); 406349cc55cSDimitry Andric #endif 407349cc55cSDimitry Andric shadow_stack_pos = shadow_stack; 408349cc55cSDimitry Andric shadow_stack_end = shadow_stack + kInitStackSize; 40968d75effSDimitry Andric } 41068d75effSDimitry Andric 41168d75effSDimitry Andric #if !SANITIZER_GO 412349cc55cSDimitry Andric void MemoryProfiler(u64 uptime) { 413349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd) 414349cc55cSDimitry Andric return; 41568d75effSDimitry Andric InternalMmapVector<char> buf(4096); 416349cc55cSDimitry Andric WriteMemoryProfile(buf.data(), buf.size(), uptime); 417349cc55cSDimitry Andric WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data())); 418349cc55cSDimitry Andric } 419349cc55cSDimitry Andric 420*0eae32dcSDimitry Andric static bool InitializeMemoryProfiler() { 421349cc55cSDimitry Andric ctx->memprof_fd = kInvalidFd; 422349cc55cSDimitry Andric const char *fname = flags()->profile_memory; 423349cc55cSDimitry Andric if (!fname || !fname[0]) 424*0eae32dcSDimitry Andric return false; 425349cc55cSDimitry Andric if (internal_strcmp(fname, "stdout") == 0) { 426349cc55cSDimitry Andric ctx->memprof_fd = 1; 427349cc55cSDimitry Andric } else if (internal_strcmp(fname, "stderr") == 0) { 428349cc55cSDimitry Andric ctx->memprof_fd = 2; 429349cc55cSDimitry Andric } else { 430349cc55cSDimitry Andric InternalScopedString filename; 431349cc55cSDimitry Andric filename.append("%s.%d", fname, (int)internal_getpid()); 432349cc55cSDimitry Andric ctx->memprof_fd = OpenFile(filename.data(), WrOnly); 433349cc55cSDimitry Andric if (ctx->memprof_fd == kInvalidFd) { 434349cc55cSDimitry Andric Printf("ThreadSanitizer: failed to open memory profile file '%s'\n", 435349cc55cSDimitry Andric filename.data()); 436*0eae32dcSDimitry Andric return false; 437349cc55cSDimitry Andric } 438349cc55cSDimitry Andric } 439349cc55cSDimitry Andric MemoryProfiler(0); 440*0eae32dcSDimitry Andric return true; 44168d75effSDimitry Andric } 44268d75effSDimitry Andric 4435ffd83dbSDimitry Andric static void *BackgroundThread(void *arg) { 44468d75effSDimitry Andric // This is a non-initialized non-user thread, nothing to see here. 44568d75effSDimitry Andric // We don't use ScopedIgnoreInterceptors, because we want ignores to be 44668d75effSDimitry Andric // enabled even when the thread function exits (e.g. during pthread thread 44768d75effSDimitry Andric // shutdown code). 448349cc55cSDimitry Andric cur_thread_init()->ignore_interceptors++; 44968d75effSDimitry Andric const u64 kMs2Ns = 1000 * 1000; 450349cc55cSDimitry Andric const u64 start = NanoTime(); 45168d75effSDimitry Andric 452*0eae32dcSDimitry Andric u64 last_flush = start; 45368d75effSDimitry Andric uptr last_rss = 0; 454*0eae32dcSDimitry Andric while (!atomic_load_relaxed(&ctx->stop_background_thread)) { 45568d75effSDimitry Andric SleepForMillis(100); 45668d75effSDimitry Andric u64 now = NanoTime(); 45768d75effSDimitry Andric 45868d75effSDimitry Andric // Flush memory if requested. 45968d75effSDimitry Andric if (flags()->flush_memory_ms > 0) { 46068d75effSDimitry Andric if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) { 461*0eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: periodic memory flush\n"); 46268d75effSDimitry Andric FlushShadowMemory(); 463*0eae32dcSDimitry Andric now = last_flush = NanoTime(); 46468d75effSDimitry Andric } 46568d75effSDimitry Andric } 46668d75effSDimitry Andric if (flags()->memory_limit_mb > 0) { 46768d75effSDimitry Andric uptr rss = GetRSS(); 46868d75effSDimitry Andric uptr limit = uptr(flags()->memory_limit_mb) << 20; 469*0eae32dcSDimitry Andric VReport(1, 470*0eae32dcSDimitry Andric "ThreadSanitizer: memory flush check" 47168d75effSDimitry Andric " RSS=%llu LAST=%llu LIMIT=%llu\n", 47268d75effSDimitry Andric (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20); 47368d75effSDimitry Andric if (2 * rss > limit + last_rss) { 474*0eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: flushing memory due to RSS\n"); 47568d75effSDimitry Andric FlushShadowMemory(); 47668d75effSDimitry Andric rss = GetRSS(); 477*0eae32dcSDimitry Andric now = NanoTime(); 478*0eae32dcSDimitry Andric VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n", 479*0eae32dcSDimitry Andric (u64)rss >> 20); 48068d75effSDimitry Andric } 48168d75effSDimitry Andric last_rss = rss; 48268d75effSDimitry Andric } 48368d75effSDimitry Andric 484349cc55cSDimitry Andric MemoryProfiler(now - start); 48568d75effSDimitry Andric 48668d75effSDimitry Andric // Flush symbolizer cache if requested. 48768d75effSDimitry Andric if (flags()->flush_symbolizer_ms > 0) { 48868d75effSDimitry Andric u64 last = atomic_load(&ctx->last_symbolize_time_ns, 48968d75effSDimitry Andric memory_order_relaxed); 49068d75effSDimitry Andric if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) { 49168d75effSDimitry Andric Lock l(&ctx->report_mtx); 49268d75effSDimitry Andric ScopedErrorReportLock l2; 49368d75effSDimitry Andric SymbolizeFlush(); 49468d75effSDimitry Andric atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed); 49568d75effSDimitry Andric } 49668d75effSDimitry Andric } 49768d75effSDimitry Andric } 4985ffd83dbSDimitry Andric return nullptr; 49968d75effSDimitry Andric } 50068d75effSDimitry Andric 50168d75effSDimitry Andric static void StartBackgroundThread() { 50268d75effSDimitry Andric ctx->background_thread = internal_start_thread(&BackgroundThread, 0); 50368d75effSDimitry Andric } 50468d75effSDimitry Andric 50568d75effSDimitry Andric #ifndef __mips__ 50668d75effSDimitry Andric static void StopBackgroundThread() { 50768d75effSDimitry Andric atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed); 50868d75effSDimitry Andric internal_join_thread(ctx->background_thread); 50968d75effSDimitry Andric ctx->background_thread = 0; 51068d75effSDimitry Andric } 51168d75effSDimitry Andric #endif 51268d75effSDimitry Andric #endif 51368d75effSDimitry Andric 51468d75effSDimitry Andric void DontNeedShadowFor(uptr addr, uptr size) { 515349cc55cSDimitry Andric ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)), 516349cc55cSDimitry Andric reinterpret_cast<uptr>(MemToShadow(addr + size))); 51768d75effSDimitry Andric } 51868d75effSDimitry Andric 51968d75effSDimitry Andric #if !SANITIZER_GO 5204824e7fdSDimitry Andric // We call UnmapShadow before the actual munmap, at that point we don't yet 5214824e7fdSDimitry Andric // know if the provided address/size are sane. We can't call UnmapShadow 5224824e7fdSDimitry Andric // after the actual munmap becuase at that point the memory range can 5234824e7fdSDimitry Andric // already be reused for something else, so we can't rely on the munmap 5244824e7fdSDimitry Andric // return value to understand is the values are sane. 5254824e7fdSDimitry Andric // While calling munmap with insane values (non-canonical address, negative 5264824e7fdSDimitry Andric // size, etc) is an error, the kernel won't crash. We must also try to not 5274824e7fdSDimitry Andric // crash as the failure mode is very confusing (paging fault inside of the 5284824e7fdSDimitry Andric // runtime on some derived shadow address). 5294824e7fdSDimitry Andric static bool IsValidMmapRange(uptr addr, uptr size) { 5304824e7fdSDimitry Andric if (size == 0) 5314824e7fdSDimitry Andric return true; 5324824e7fdSDimitry Andric if (static_cast<sptr>(size) < 0) 5334824e7fdSDimitry Andric return false; 5344824e7fdSDimitry Andric if (!IsAppMem(addr) || !IsAppMem(addr + size - 1)) 5354824e7fdSDimitry Andric return false; 5364824e7fdSDimitry Andric // Check that if the start of the region belongs to one of app ranges, 5374824e7fdSDimitry Andric // end of the region belongs to the same region. 5384824e7fdSDimitry Andric const uptr ranges[][2] = { 5394824e7fdSDimitry Andric {LoAppMemBeg(), LoAppMemEnd()}, 5404824e7fdSDimitry Andric {MidAppMemBeg(), MidAppMemEnd()}, 5414824e7fdSDimitry Andric {HiAppMemBeg(), HiAppMemEnd()}, 5424824e7fdSDimitry Andric }; 5434824e7fdSDimitry Andric for (auto range : ranges) { 5444824e7fdSDimitry Andric if (addr >= range[0] && addr < range[1]) 5454824e7fdSDimitry Andric return addr + size <= range[1]; 5464824e7fdSDimitry Andric } 5474824e7fdSDimitry Andric return false; 5484824e7fdSDimitry Andric } 5494824e7fdSDimitry Andric 55068d75effSDimitry Andric void UnmapShadow(ThreadState *thr, uptr addr, uptr size) { 5514824e7fdSDimitry Andric if (size == 0 || !IsValidMmapRange(addr, size)) 5524824e7fdSDimitry Andric return; 55368d75effSDimitry Andric DontNeedShadowFor(addr, size); 55468d75effSDimitry Andric ScopedGlobalProcessor sgp; 555*0eae32dcSDimitry Andric SlotLocker locker(thr, true); 556*0eae32dcSDimitry Andric ctx->metamap.ResetRange(thr->proc(), addr, size, true); 55768d75effSDimitry Andric } 55868d75effSDimitry Andric #endif 55968d75effSDimitry Andric 56068d75effSDimitry Andric void MapShadow(uptr addr, uptr size) { 56168d75effSDimitry Andric // Global data is not 64K aligned, but there are no adjacent mappings, 56268d75effSDimitry Andric // so we can get away with unaligned mapping. 56368d75effSDimitry Andric // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment 56468d75effSDimitry Andric const uptr kPageSize = GetPageSizeCached(); 56568d75effSDimitry Andric uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); 56668d75effSDimitry Andric uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); 567e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, 568e8d8bef9SDimitry Andric "shadow")) 56968d75effSDimitry Andric Die(); 57068d75effSDimitry Andric 57168d75effSDimitry Andric // Meta shadow is 2:1, so tread carefully. 57268d75effSDimitry Andric static bool data_mapped = false; 57368d75effSDimitry Andric static uptr mapped_meta_end = 0; 57468d75effSDimitry Andric uptr meta_begin = (uptr)MemToMeta(addr); 57568d75effSDimitry Andric uptr meta_end = (uptr)MemToMeta(addr + size); 57668d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 57768d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 57868d75effSDimitry Andric if (!data_mapped) { 57968d75effSDimitry Andric // First call maps data+bss. 58068d75effSDimitry Andric data_mapped = true; 581e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 582e8d8bef9SDimitry Andric "meta shadow")) 58368d75effSDimitry Andric Die(); 58468d75effSDimitry Andric } else { 585349cc55cSDimitry Andric // Mapping continuous heap. 58668d75effSDimitry Andric // Windows wants 64K alignment. 58768d75effSDimitry Andric meta_begin = RoundDownTo(meta_begin, 64 << 10); 58868d75effSDimitry Andric meta_end = RoundUpTo(meta_end, 64 << 10); 58968d75effSDimitry Andric if (meta_end <= mapped_meta_end) 59068d75effSDimitry Andric return; 59168d75effSDimitry Andric if (meta_begin < mapped_meta_end) 59268d75effSDimitry Andric meta_begin = mapped_meta_end; 593e8d8bef9SDimitry Andric if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin, 594e8d8bef9SDimitry Andric "meta shadow")) 59568d75effSDimitry Andric Die(); 59668d75effSDimitry Andric mapped_meta_end = meta_end; 59768d75effSDimitry Andric } 598349cc55cSDimitry Andric VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr, 599349cc55cSDimitry Andric addr + size, meta_begin, meta_end); 60068d75effSDimitry Andric } 60168d75effSDimitry Andric 60268d75effSDimitry Andric #if !SANITIZER_GO 60368d75effSDimitry Andric static void OnStackUnwind(const SignalContext &sig, const void *, 60468d75effSDimitry Andric BufferedStackTrace *stack) { 60568d75effSDimitry Andric stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context, 60668d75effSDimitry Andric common_flags()->fast_unwind_on_fatal); 60768d75effSDimitry Andric } 60868d75effSDimitry Andric 60968d75effSDimitry Andric static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) { 61068d75effSDimitry Andric HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr); 61168d75effSDimitry Andric } 61268d75effSDimitry Andric #endif 61368d75effSDimitry Andric 614fe6060f1SDimitry Andric void CheckUnwind() { 615fe6060f1SDimitry Andric // There is high probability that interceptors will check-fail as well, 616fe6060f1SDimitry Andric // on the other hand there is no sense in processing interceptors 617fe6060f1SDimitry Andric // since we are going to die soon. 618fe6060f1SDimitry Andric ScopedIgnoreInterceptors ignore; 619fe6060f1SDimitry Andric #if !SANITIZER_GO 620*0eae32dcSDimitry Andric ThreadState* thr = cur_thread(); 621*0eae32dcSDimitry Andric thr->nomalloc = false; 622*0eae32dcSDimitry Andric thr->ignore_sync++; 623*0eae32dcSDimitry Andric thr->ignore_reads_and_writes++; 624*0eae32dcSDimitry Andric atomic_store_relaxed(&thr->in_signal_handler, 0); 625fe6060f1SDimitry Andric #endif 626fe6060f1SDimitry Andric PrintCurrentStackSlow(StackTrace::GetCurrentPc()); 627fe6060f1SDimitry Andric } 628fe6060f1SDimitry Andric 629349cc55cSDimitry Andric bool is_initialized; 630349cc55cSDimitry Andric 63168d75effSDimitry Andric void Initialize(ThreadState *thr) { 63268d75effSDimitry Andric // Thread safe because done before all threads exist. 63368d75effSDimitry Andric if (is_initialized) 63468d75effSDimitry Andric return; 63568d75effSDimitry Andric is_initialized = true; 63668d75effSDimitry Andric // We are not ready to handle interceptors yet. 63768d75effSDimitry Andric ScopedIgnoreInterceptors ignore; 63868d75effSDimitry Andric SanitizerToolName = "ThreadSanitizer"; 63968d75effSDimitry Andric // Install tool-specific callbacks in sanitizer_common. 640fe6060f1SDimitry Andric SetCheckUnwindCallback(CheckUnwind); 64168d75effSDimitry Andric 64268d75effSDimitry Andric ctx = new(ctx_placeholder) Context; 64368d75effSDimitry Andric const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS"; 64468d75effSDimitry Andric const char *options = GetEnv(env_name); 64568d75effSDimitry Andric CacheBinaryName(); 64668d75effSDimitry Andric CheckASLR(); 64768d75effSDimitry Andric InitializeFlags(&ctx->flags, options, env_name); 64868d75effSDimitry Andric AvoidCVE_2016_2143(); 64968d75effSDimitry Andric __sanitizer::InitializePlatformEarly(); 65068d75effSDimitry Andric __tsan::InitializePlatformEarly(); 65168d75effSDimitry Andric 65268d75effSDimitry Andric #if !SANITIZER_GO 65368d75effSDimitry Andric // Re-exec ourselves if we need to set additional env or command line args. 65468d75effSDimitry Andric MaybeReexec(); 65568d75effSDimitry Andric 65668d75effSDimitry Andric InitializeAllocator(); 65768d75effSDimitry Andric ReplaceSystemMalloc(); 65868d75effSDimitry Andric #endif 65968d75effSDimitry Andric if (common_flags()->detect_deadlocks) 66068d75effSDimitry Andric ctx->dd = DDetector::Create(flags()); 66168d75effSDimitry Andric Processor *proc = ProcCreate(); 66268d75effSDimitry Andric ProcWire(proc, thr); 66368d75effSDimitry Andric InitializeInterceptors(); 66468d75effSDimitry Andric InitializePlatform(); 66568d75effSDimitry Andric InitializeDynamicAnnotations(); 66668d75effSDimitry Andric #if !SANITIZER_GO 66768d75effSDimitry Andric InitializeShadowMemory(); 66868d75effSDimitry Andric InitializeAllocatorLate(); 66968d75effSDimitry Andric InstallDeadlySignalHandlers(TsanOnDeadlySignal); 67068d75effSDimitry Andric #endif 67168d75effSDimitry Andric // Setup correct file descriptor for error reports. 67268d75effSDimitry Andric __sanitizer_set_report_path(common_flags()->log_path); 67368d75effSDimitry Andric InitializeSuppressions(); 67468d75effSDimitry Andric #if !SANITIZER_GO 67568d75effSDimitry Andric InitializeLibIgnore(); 67668d75effSDimitry Andric Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); 67768d75effSDimitry Andric #endif 67868d75effSDimitry Andric 679*0eae32dcSDimitry Andric VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n", 68068d75effSDimitry Andric (int)internal_getpid()); 68168d75effSDimitry Andric 68268d75effSDimitry Andric // Initialize thread 0. 683*0eae32dcSDimitry Andric Tid tid = ThreadCreate(nullptr, 0, 0, true); 684349cc55cSDimitry Andric CHECK_EQ(tid, kMainTid); 68568d75effSDimitry Andric ThreadStart(thr, tid, GetTid(), ThreadType::Regular); 68668d75effSDimitry Andric #if TSAN_CONTAINS_UBSAN 68768d75effSDimitry Andric __ubsan::InitAsPlugin(); 68868d75effSDimitry Andric #endif 68968d75effSDimitry Andric 69068d75effSDimitry Andric #if !SANITIZER_GO 69168d75effSDimitry Andric Symbolizer::LateInitialize(); 692*0eae32dcSDimitry Andric if (InitializeMemoryProfiler() || flags()->force_background_thread) 693*0eae32dcSDimitry Andric MaybeSpawnBackgroundThread(); 69468d75effSDimitry Andric #endif 695*0eae32dcSDimitry Andric ctx->initialized = true; 69668d75effSDimitry Andric 69768d75effSDimitry Andric if (flags()->stop_on_start) { 69868d75effSDimitry Andric Printf("ThreadSanitizer is suspended at startup (pid %d)." 69968d75effSDimitry Andric " Call __tsan_resume().\n", 70068d75effSDimitry Andric (int)internal_getpid()); 70168d75effSDimitry Andric while (__tsan_resumed == 0) {} 70268d75effSDimitry Andric } 70368d75effSDimitry Andric 70468d75effSDimitry Andric OnInitialize(); 70568d75effSDimitry Andric } 70668d75effSDimitry Andric 70768d75effSDimitry Andric void MaybeSpawnBackgroundThread() { 70868d75effSDimitry Andric // On MIPS, TSan initialization is run before 70968d75effSDimitry Andric // __pthread_initialize_minimal_internal() is finished, so we can not spawn 71068d75effSDimitry Andric // new threads. 71168d75effSDimitry Andric #if !SANITIZER_GO && !defined(__mips__) 71268d75effSDimitry Andric static atomic_uint32_t bg_thread = {}; 71368d75effSDimitry Andric if (atomic_load(&bg_thread, memory_order_relaxed) == 0 && 71468d75effSDimitry Andric atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) { 71568d75effSDimitry Andric StartBackgroundThread(); 71668d75effSDimitry Andric SetSandboxingCallback(StopBackgroundThread); 71768d75effSDimitry Andric } 71868d75effSDimitry Andric #endif 71968d75effSDimitry Andric } 72068d75effSDimitry Andric 72168d75effSDimitry Andric int Finalize(ThreadState *thr) { 72268d75effSDimitry Andric bool failed = false; 72368d75effSDimitry Andric 724e8d8bef9SDimitry Andric if (common_flags()->print_module_map == 1) 725e8d8bef9SDimitry Andric DumpProcessMap(); 72668d75effSDimitry Andric 72768d75effSDimitry Andric if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1) 728*0eae32dcSDimitry Andric internal_usleep(u64(flags()->atexit_sleep_ms) * 1000); 72968d75effSDimitry Andric 730*0eae32dcSDimitry Andric { 73168d75effSDimitry Andric // Wait for pending reports. 732*0eae32dcSDimitry Andric ScopedErrorReportLock lock; 733*0eae32dcSDimitry Andric } 73468d75effSDimitry Andric 73568d75effSDimitry Andric #if !SANITIZER_GO 73668d75effSDimitry Andric if (Verbosity()) AllocatorPrintStats(); 73768d75effSDimitry Andric #endif 73868d75effSDimitry Andric 73968d75effSDimitry Andric ThreadFinalize(thr); 74068d75effSDimitry Andric 74168d75effSDimitry Andric if (ctx->nreported) { 74268d75effSDimitry Andric failed = true; 74368d75effSDimitry Andric #if !SANITIZER_GO 74468d75effSDimitry Andric Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported); 74568d75effSDimitry Andric #else 74668d75effSDimitry Andric Printf("Found %d data race(s)\n", ctx->nreported); 74768d75effSDimitry Andric #endif 74868d75effSDimitry Andric } 74968d75effSDimitry Andric 75068d75effSDimitry Andric if (common_flags()->print_suppressions) 75168d75effSDimitry Andric PrintMatchedSuppressions(); 75268d75effSDimitry Andric 75368d75effSDimitry Andric failed = OnFinalize(failed); 75468d75effSDimitry Andric 75568d75effSDimitry Andric return failed ? common_flags()->exitcode : 0; 75668d75effSDimitry Andric } 75768d75effSDimitry Andric 75868d75effSDimitry Andric #if !SANITIZER_GO 759fe6060f1SDimitry Andric void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS { 760*0eae32dcSDimitry Andric GlobalProcessorLock(); 761*0eae32dcSDimitry Andric // Detaching from the slot makes OnUserFree skip writing to the shadow. 762*0eae32dcSDimitry Andric // The slot will be locked so any attempts to use it will deadlock anyway. 763*0eae32dcSDimitry Andric SlotDetach(thr); 764*0eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Lock(); 765349cc55cSDimitry Andric ctx->thread_registry.Lock(); 766*0eae32dcSDimitry Andric ctx->slot_mtx.Lock(); 767fe6060f1SDimitry Andric ScopedErrorReportLock::Lock(); 7684824e7fdSDimitry Andric AllocatorLock(); 769fe6060f1SDimitry Andric // Suppress all reports in the pthread_atfork callbacks. 770fe6060f1SDimitry Andric // Reports will deadlock on the report_mtx. 771fe6060f1SDimitry Andric // We could ignore sync operations as well, 7725ffd83dbSDimitry Andric // but so far it's unclear if it will do more good or harm. 7735ffd83dbSDimitry Andric // Unnecessarily ignoring things can lead to false positives later. 774fe6060f1SDimitry Andric thr->suppress_reports++; 775fe6060f1SDimitry Andric // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 776fe6060f1SDimitry Andric // we'll assert in CheckNoLocks() unless we ignore interceptors. 7774824e7fdSDimitry Andric // On OS X libSystem_atfork_prepare/parent/child callbacks are called 7784824e7fdSDimitry Andric // after/before our callbacks and they call free. 779fe6060f1SDimitry Andric thr->ignore_interceptors++; 7804824e7fdSDimitry Andric // Disables memory write in OnUserAlloc/Free. 7814824e7fdSDimitry Andric thr->ignore_reads_and_writes++; 7824824e7fdSDimitry Andric 7834824e7fdSDimitry Andric __tsan_test_only_on_fork(); 78468d75effSDimitry Andric } 78568d75effSDimitry Andric 786*0eae32dcSDimitry Andric static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS { 787fe6060f1SDimitry Andric thr->suppress_reports--; // Enabled in ForkBefore. 788fe6060f1SDimitry Andric thr->ignore_interceptors--; 7894824e7fdSDimitry Andric thr->ignore_reads_and_writes--; 7904824e7fdSDimitry Andric AllocatorUnlock(); 791fe6060f1SDimitry Andric ScopedErrorReportLock::Unlock(); 792*0eae32dcSDimitry Andric ctx->slot_mtx.Unlock(); 793349cc55cSDimitry Andric ctx->thread_registry.Unlock(); 794*0eae32dcSDimitry Andric for (auto& slot : ctx->slots) slot.mtx.Unlock(); 795*0eae32dcSDimitry Andric SlotAttachAndLock(thr); 796*0eae32dcSDimitry Andric SlotUnlock(thr); 797*0eae32dcSDimitry Andric GlobalProcessorUnlock(); 79868d75effSDimitry Andric } 79968d75effSDimitry Andric 800*0eae32dcSDimitry Andric void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); } 80168d75effSDimitry Andric 802*0eae32dcSDimitry Andric void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) { 803*0eae32dcSDimitry Andric ForkAfter(thr); 804*0eae32dcSDimitry Andric u32 nthread = ctx->thread_registry.OnFork(thr->tid); 805*0eae32dcSDimitry Andric VPrintf(1, 806*0eae32dcSDimitry Andric "ThreadSanitizer: forked new process with pid %d," 807*0eae32dcSDimitry Andric " parent had %d threads\n", 808*0eae32dcSDimitry Andric (int)internal_getpid(), (int)nthread); 80968d75effSDimitry Andric if (nthread == 1) { 810349cc55cSDimitry Andric if (start_thread) 81168d75effSDimitry Andric StartBackgroundThread(); 81268d75effSDimitry Andric } else { 81368d75effSDimitry Andric // We've just forked a multi-threaded process. We cannot reasonably function 81468d75effSDimitry Andric // after that (some mutexes may be locked before fork). So just enable 81568d75effSDimitry Andric // ignores for everything in the hope that we will exec soon. 81668d75effSDimitry Andric ctx->after_multithreaded_fork = true; 81768d75effSDimitry Andric thr->ignore_interceptors++; 818*0eae32dcSDimitry Andric thr->suppress_reports++; 81968d75effSDimitry Andric ThreadIgnoreBegin(thr, pc); 82068d75effSDimitry Andric ThreadIgnoreSyncBegin(thr, pc); 82168d75effSDimitry Andric } 82268d75effSDimitry Andric } 82368d75effSDimitry Andric #endif 82468d75effSDimitry Andric 82568d75effSDimitry Andric #if SANITIZER_GO 82668d75effSDimitry Andric NOINLINE 82768d75effSDimitry Andric void GrowShadowStack(ThreadState *thr) { 82868d75effSDimitry Andric const int sz = thr->shadow_stack_end - thr->shadow_stack; 82968d75effSDimitry Andric const int newsz = 2 * sz; 830349cc55cSDimitry Andric auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr)); 83168d75effSDimitry Andric internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr)); 832349cc55cSDimitry Andric Free(thr->shadow_stack); 83368d75effSDimitry Andric thr->shadow_stack = newstack; 83468d75effSDimitry Andric thr->shadow_stack_pos = newstack + sz; 83568d75effSDimitry Andric thr->shadow_stack_end = newstack + newsz; 83668d75effSDimitry Andric } 83768d75effSDimitry Andric #endif 83868d75effSDimitry Andric 839349cc55cSDimitry Andric StackID CurrentStackId(ThreadState *thr, uptr pc) { 840*0eae32dcSDimitry Andric #if !SANITIZER_GO 84168d75effSDimitry Andric if (!thr->is_inited) // May happen during bootstrap. 842349cc55cSDimitry Andric return kInvalidStackID; 843*0eae32dcSDimitry Andric #endif 84468d75effSDimitry Andric if (pc != 0) { 84568d75effSDimitry Andric #if !SANITIZER_GO 84668d75effSDimitry Andric DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end); 84768d75effSDimitry Andric #else 84868d75effSDimitry Andric if (thr->shadow_stack_pos == thr->shadow_stack_end) 84968d75effSDimitry Andric GrowShadowStack(thr); 85068d75effSDimitry Andric #endif 85168d75effSDimitry Andric thr->shadow_stack_pos[0] = pc; 85268d75effSDimitry Andric thr->shadow_stack_pos++; 85368d75effSDimitry Andric } 854349cc55cSDimitry Andric StackID id = StackDepotPut( 85568d75effSDimitry Andric StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack)); 85668d75effSDimitry Andric if (pc != 0) 85768d75effSDimitry Andric thr->shadow_stack_pos--; 85868d75effSDimitry Andric return id; 85968d75effSDimitry Andric } 86068d75effSDimitry Andric 861*0eae32dcSDimitry Andric static bool TraceSkipGap(ThreadState* thr) { 862349cc55cSDimitry Andric Trace *trace = &thr->tctx->trace; 863349cc55cSDimitry Andric Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos)); 864349cc55cSDimitry Andric DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0); 865349cc55cSDimitry Andric auto *part = trace->parts.Back(); 866*0eae32dcSDimitry Andric DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid, 867*0eae32dcSDimitry Andric trace, trace->parts.Front(), part, pos); 868*0eae32dcSDimitry Andric if (!part) 869*0eae32dcSDimitry Andric return false; 870349cc55cSDimitry Andric // We can get here when we still have space in the current trace part. 871349cc55cSDimitry Andric // The fast-path check in TraceAcquire has false positives in the middle of 872349cc55cSDimitry Andric // the part. Check if we are indeed at the end of the current part or not, 873349cc55cSDimitry Andric // and fill any gaps with NopEvent's. 874349cc55cSDimitry Andric Event* end = &part->events[TracePart::kSize]; 875349cc55cSDimitry Andric DCHECK_GE(pos, &part->events[0]); 876349cc55cSDimitry Andric DCHECK_LE(pos, end); 877349cc55cSDimitry Andric if (pos + 1 < end) { 878349cc55cSDimitry Andric if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) == 879349cc55cSDimitry Andric TracePart::kAlignment) 880349cc55cSDimitry Andric *pos++ = NopEvent; 881349cc55cSDimitry Andric *pos++ = NopEvent; 882349cc55cSDimitry Andric DCHECK_LE(pos + 2, end); 883349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos)); 884*0eae32dcSDimitry Andric return true; 885349cc55cSDimitry Andric } 886349cc55cSDimitry Andric // We are indeed at the end. 887349cc55cSDimitry Andric for (; pos < end; pos++) *pos = NopEvent; 888*0eae32dcSDimitry Andric return false; 889349cc55cSDimitry Andric } 890*0eae32dcSDimitry Andric 891*0eae32dcSDimitry Andric NOINLINE 892*0eae32dcSDimitry Andric void TraceSwitchPart(ThreadState* thr) { 893*0eae32dcSDimitry Andric if (TraceSkipGap(thr)) 894*0eae32dcSDimitry Andric return; 895349cc55cSDimitry Andric #if !SANITIZER_GO 896349cc55cSDimitry Andric if (ctx->after_multithreaded_fork) { 897349cc55cSDimitry Andric // We just need to survive till exec. 898*0eae32dcSDimitry Andric TracePart* part = thr->tctx->trace.parts.Back(); 899*0eae32dcSDimitry Andric if (part) { 900349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 901349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0])); 902349cc55cSDimitry Andric return; 903349cc55cSDimitry Andric } 904*0eae32dcSDimitry Andric } 905349cc55cSDimitry Andric #endif 906*0eae32dcSDimitry Andric TraceSwitchPartImpl(thr); 907*0eae32dcSDimitry Andric } 908*0eae32dcSDimitry Andric 909*0eae32dcSDimitry Andric void TraceSwitchPartImpl(ThreadState* thr) { 910*0eae32dcSDimitry Andric SlotLocker locker(thr, true); 911*0eae32dcSDimitry Andric Trace* trace = &thr->tctx->trace; 912*0eae32dcSDimitry Andric TracePart* part = TracePartAlloc(thr); 913349cc55cSDimitry Andric part->trace = trace; 914349cc55cSDimitry Andric thr->trace_prev_pc = 0; 915*0eae32dcSDimitry Andric TracePart* recycle = nullptr; 916*0eae32dcSDimitry Andric // Keep roughly half of parts local to the thread 917*0eae32dcSDimitry Andric // (not queued into the recycle queue). 918*0eae32dcSDimitry Andric uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2; 919349cc55cSDimitry Andric { 920349cc55cSDimitry Andric Lock lock(&trace->mtx); 921*0eae32dcSDimitry Andric if (trace->parts.Empty()) 922*0eae32dcSDimitry Andric trace->local_head = part; 923*0eae32dcSDimitry Andric if (trace->parts.Size() >= local_parts) { 924*0eae32dcSDimitry Andric recycle = trace->local_head; 925*0eae32dcSDimitry Andric trace->local_head = trace->parts.Next(recycle); 926*0eae32dcSDimitry Andric } 927349cc55cSDimitry Andric trace->parts.PushBack(part); 928349cc55cSDimitry Andric atomic_store_relaxed(&thr->trace_pos, 929349cc55cSDimitry Andric reinterpret_cast<uptr>(&part->events[0])); 930349cc55cSDimitry Andric } 931349cc55cSDimitry Andric // Make this part self-sufficient by restoring the current stack 932349cc55cSDimitry Andric // and mutex set in the beginning of the trace. 933349cc55cSDimitry Andric TraceTime(thr); 934*0eae32dcSDimitry Andric { 935*0eae32dcSDimitry Andric // Pathologically large stacks may not fit into the part. 936*0eae32dcSDimitry Andric // In these cases we log only fixed number of top frames. 937*0eae32dcSDimitry Andric const uptr kMaxFrames = 1000; 938*0eae32dcSDimitry Andric // Sanity check that kMaxFrames won't consume the whole part. 939*0eae32dcSDimitry Andric static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big"); 940*0eae32dcSDimitry Andric uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames); 941*0eae32dcSDimitry Andric for (; pos < thr->shadow_stack_pos; pos++) { 942*0eae32dcSDimitry Andric if (TryTraceFunc(thr, *pos)) 943*0eae32dcSDimitry Andric continue; 944*0eae32dcSDimitry Andric CHECK(TraceSkipGap(thr)); 945349cc55cSDimitry Andric CHECK(TryTraceFunc(thr, *pos)); 946*0eae32dcSDimitry Andric } 947*0eae32dcSDimitry Andric } 948349cc55cSDimitry Andric for (uptr i = 0; i < thr->mset.Size(); i++) { 949349cc55cSDimitry Andric MutexSet::Desc d = thr->mset.Get(i); 950*0eae32dcSDimitry Andric for (uptr i = 0; i < d.count; i++) 951349cc55cSDimitry Andric TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0, 952349cc55cSDimitry Andric d.addr, d.stack_id); 953349cc55cSDimitry Andric } 954*0eae32dcSDimitry Andric { 955*0eae32dcSDimitry Andric Lock lock(&ctx->slot_mtx); 956*0eae32dcSDimitry Andric // There is a small chance that the slot may be not queued at this point. 957*0eae32dcSDimitry Andric // This can happen if the slot has kEpochLast epoch and another thread 958*0eae32dcSDimitry Andric // in FindSlotAndLock discovered that it's exhausted and removed it from 959*0eae32dcSDimitry Andric // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart 960*0eae32dcSDimitry Andric // was called with the slot locked and epoch already at kEpochLast, 961*0eae32dcSDimitry Andric // or (2) if we've acquired a new slot in SlotLock in the beginning 962*0eae32dcSDimitry Andric // of the function and the slot was at kEpochLast - 1, so after increment 963*0eae32dcSDimitry Andric // in SlotAttachAndLock it become kEpochLast. 964*0eae32dcSDimitry Andric if (ctx->slot_queue.Queued(thr->slot)) { 965*0eae32dcSDimitry Andric ctx->slot_queue.Remove(thr->slot); 966*0eae32dcSDimitry Andric ctx->slot_queue.PushBack(thr->slot); 967349cc55cSDimitry Andric } 968*0eae32dcSDimitry Andric if (recycle) 969*0eae32dcSDimitry Andric ctx->trace_part_recycle.PushBack(recycle); 97068d75effSDimitry Andric } 971*0eae32dcSDimitry Andric DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid, 972*0eae32dcSDimitry Andric trace->parts.Front(), trace->parts.Back(), 973*0eae32dcSDimitry Andric atomic_load_relaxed(&thr->trace_pos)); 97468d75effSDimitry Andric } 97568d75effSDimitry Andric 976349cc55cSDimitry Andric void ThreadIgnoreBegin(ThreadState* thr, uptr pc) { 97768d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid); 97868d75effSDimitry Andric thr->ignore_reads_and_writes++; 97968d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 98068d75effSDimitry Andric thr->fast_state.SetIgnoreBit(); 98168d75effSDimitry Andric #if !SANITIZER_GO 982349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork) 98368d75effSDimitry Andric thr->mop_ignore_set.Add(CurrentStackId(thr, pc)); 98468d75effSDimitry Andric #endif 98568d75effSDimitry Andric } 98668d75effSDimitry Andric 987349cc55cSDimitry Andric void ThreadIgnoreEnd(ThreadState *thr) { 98868d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid); 98968d75effSDimitry Andric CHECK_GT(thr->ignore_reads_and_writes, 0); 99068d75effSDimitry Andric thr->ignore_reads_and_writes--; 99168d75effSDimitry Andric if (thr->ignore_reads_and_writes == 0) { 99268d75effSDimitry Andric thr->fast_state.ClearIgnoreBit(); 99368d75effSDimitry Andric #if !SANITIZER_GO 99468d75effSDimitry Andric thr->mop_ignore_set.Reset(); 99568d75effSDimitry Andric #endif 99668d75effSDimitry Andric } 99768d75effSDimitry Andric } 99868d75effSDimitry Andric 99968d75effSDimitry Andric #if !SANITIZER_GO 100068d75effSDimitry Andric extern "C" SANITIZER_INTERFACE_ATTRIBUTE 100168d75effSDimitry Andric uptr __tsan_testonly_shadow_stack_current_size() { 100268d75effSDimitry Andric ThreadState *thr = cur_thread(); 100368d75effSDimitry Andric return thr->shadow_stack_pos - thr->shadow_stack; 100468d75effSDimitry Andric } 100568d75effSDimitry Andric #endif 100668d75effSDimitry Andric 1007349cc55cSDimitry Andric void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) { 100868d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid); 100968d75effSDimitry Andric thr->ignore_sync++; 101068d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 101168d75effSDimitry Andric #if !SANITIZER_GO 1012349cc55cSDimitry Andric if (pc && !ctx->after_multithreaded_fork) 101368d75effSDimitry Andric thr->sync_ignore_set.Add(CurrentStackId(thr, pc)); 101468d75effSDimitry Andric #endif 101568d75effSDimitry Andric } 101668d75effSDimitry Andric 1017349cc55cSDimitry Andric void ThreadIgnoreSyncEnd(ThreadState *thr) { 101868d75effSDimitry Andric DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid); 101968d75effSDimitry Andric CHECK_GT(thr->ignore_sync, 0); 102068d75effSDimitry Andric thr->ignore_sync--; 102168d75effSDimitry Andric #if !SANITIZER_GO 102268d75effSDimitry Andric if (thr->ignore_sync == 0) 102368d75effSDimitry Andric thr->sync_ignore_set.Reset(); 102468d75effSDimitry Andric #endif 102568d75effSDimitry Andric } 102668d75effSDimitry Andric 102768d75effSDimitry Andric bool MD5Hash::operator==(const MD5Hash &other) const { 102868d75effSDimitry Andric return hash[0] == other.hash[0] && hash[1] == other.hash[1]; 102968d75effSDimitry Andric } 103068d75effSDimitry Andric 103168d75effSDimitry Andric #if SANITIZER_DEBUG 103268d75effSDimitry Andric void build_consistency_debug() {} 103368d75effSDimitry Andric #else 103468d75effSDimitry Andric void build_consistency_release() {} 103568d75effSDimitry Andric #endif 103668d75effSDimitry Andric } // namespace __tsan 103768d75effSDimitry Andric 1038fe6060f1SDimitry Andric #if SANITIZER_CHECK_DEADLOCKS 1039fe6060f1SDimitry Andric namespace __sanitizer { 1040fe6060f1SDimitry Andric using namespace __tsan; 1041fe6060f1SDimitry Andric MutexMeta mutex_meta[] = { 1042fe6060f1SDimitry Andric {MutexInvalid, "Invalid", {}}, 1043*0eae32dcSDimitry Andric {MutexThreadRegistry, 1044*0eae32dcSDimitry Andric "ThreadRegistry", 1045*0eae32dcSDimitry Andric {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}}, 1046*0eae32dcSDimitry Andric {MutexTypeReport, "Report", {MutexTypeTrace}}, 1047*0eae32dcSDimitry Andric {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}}, 1048fe6060f1SDimitry Andric {MutexTypeAnnotations, "Annotations", {}}, 1049*0eae32dcSDimitry Andric {MutexTypeAtExit, "AtExit", {}}, 1050fe6060f1SDimitry Andric {MutexTypeFired, "Fired", {MutexLeaf}}, 1051fe6060f1SDimitry Andric {MutexTypeRacy, "Racy", {MutexLeaf}}, 1052*0eae32dcSDimitry Andric {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}}, 10534824e7fdSDimitry Andric {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}}, 1054*0eae32dcSDimitry Andric {MutexTypeTrace, "Trace", {}}, 1055*0eae32dcSDimitry Andric {MutexTypeSlot, 1056*0eae32dcSDimitry Andric "Slot", 1057*0eae32dcSDimitry Andric {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry, 1058*0eae32dcSDimitry Andric MutexTypeSlots}}, 1059*0eae32dcSDimitry Andric {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}}, 1060fe6060f1SDimitry Andric {}, 1061fe6060f1SDimitry Andric }; 1062fe6060f1SDimitry Andric 1063fe6060f1SDimitry Andric void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); } 1064*0eae32dcSDimitry Andric 1065fe6060f1SDimitry Andric } // namespace __sanitizer 1066fe6060f1SDimitry Andric #endif 1067