13cab2bb3Spatrick //===-- tsan_rtl.cpp ------------------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick // Main file (entry points) for the TSan run-time.
123cab2bb3Spatrick //===----------------------------------------------------------------------===//
133cab2bb3Spatrick
14d89ec533Spatrick #include "tsan_rtl.h"
15d89ec533Spatrick
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_atomic.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
183cab2bb3Spatrick #include "sanitizer_common/sanitizer_file.h"
19*810390e3Srobert #include "sanitizer_common/sanitizer_interface_internal.h"
203cab2bb3Spatrick #include "sanitizer_common/sanitizer_libc.h"
213cab2bb3Spatrick #include "sanitizer_common/sanitizer_placement_new.h"
22d89ec533Spatrick #include "sanitizer_common/sanitizer_stackdepot.h"
233cab2bb3Spatrick #include "sanitizer_common/sanitizer_symbolizer.h"
243cab2bb3Spatrick #include "tsan_defs.h"
25d89ec533Spatrick #include "tsan_interface.h"
263cab2bb3Spatrick #include "tsan_mman.h"
27d89ec533Spatrick #include "tsan_platform.h"
283cab2bb3Spatrick #include "tsan_suppressions.h"
293cab2bb3Spatrick #include "tsan_symbolize.h"
303cab2bb3Spatrick #include "ubsan/ubsan_init.h"
313cab2bb3Spatrick
323cab2bb3Spatrick volatile int __tsan_resumed = 0;
333cab2bb3Spatrick
__tsan_resume()343cab2bb3Spatrick extern "C" void __tsan_resume() {
353cab2bb3Spatrick __tsan_resumed = 1;
363cab2bb3Spatrick }
373cab2bb3Spatrick
38*810390e3Srobert SANITIZER_WEAK_DEFAULT_IMPL
__tsan_test_only_on_fork()39*810390e3Srobert void __tsan_test_only_on_fork() {}
40*810390e3Srobert
413cab2bb3Spatrick namespace __tsan {
423cab2bb3Spatrick
43*810390e3Srobert #if !SANITIZER_GO
44*810390e3Srobert void (*on_initialize)(void);
45*810390e3Srobert int (*on_finalize)(int);
463cab2bb3Spatrick #endif
47*810390e3Srobert
48*810390e3Srobert #if !SANITIZER_GO && !SANITIZER_APPLE
49*810390e3Srobert __attribute__((tls_model("initial-exec")))
50*810390e3Srobert THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(
51*810390e3Srobert SANITIZER_CACHE_LINE_SIZE);
52*810390e3Srobert #endif
53*810390e3Srobert static char ctx_placeholder[sizeof(Context)] ALIGNED(SANITIZER_CACHE_LINE_SIZE);
543cab2bb3Spatrick Context *ctx;
553cab2bb3Spatrick
563cab2bb3Spatrick // Can be overriden by a front-end.
573cab2bb3Spatrick #ifdef TSAN_EXTERNAL_HOOKS
583cab2bb3Spatrick bool OnFinalize(bool failed);
593cab2bb3Spatrick void OnInitialize();
603cab2bb3Spatrick #else
613cab2bb3Spatrick SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnFinalize(bool failed)623cab2bb3Spatrick bool OnFinalize(bool failed) {
63d89ec533Spatrick # if !SANITIZER_GO
64*810390e3Srobert if (on_finalize)
65*810390e3Srobert return on_finalize(failed);
66d89ec533Spatrick # endif
673cab2bb3Spatrick return failed;
683cab2bb3Spatrick }
69*810390e3Srobert
703cab2bb3Spatrick SANITIZER_WEAK_CXX_DEFAULT_IMPL
OnInitialize()71d89ec533Spatrick void OnInitialize() {
72d89ec533Spatrick # if !SANITIZER_GO
73*810390e3Srobert if (on_initialize)
74*810390e3Srobert on_initialize();
75d89ec533Spatrick # endif
76d89ec533Spatrick }
773cab2bb3Spatrick #endif
783cab2bb3Spatrick
TracePartAlloc(ThreadState * thr)79*810390e3Srobert static TracePart* TracePartAlloc(ThreadState* thr) {
80*810390e3Srobert TracePart* part = nullptr;
81*810390e3Srobert {
82*810390e3Srobert Lock lock(&ctx->slot_mtx);
83*810390e3Srobert uptr max_parts = Trace::kMinParts + flags()->history_size;
84*810390e3Srobert Trace* trace = &thr->tctx->trace;
85*810390e3Srobert if (trace->parts_allocated == max_parts ||
86*810390e3Srobert ctx->trace_part_finished_excess) {
87*810390e3Srobert part = ctx->trace_part_recycle.PopFront();
88*810390e3Srobert DPrintf("#%d: TracePartAlloc: part=%p\n", thr->tid, part);
89*810390e3Srobert if (part && part->trace) {
90*810390e3Srobert Trace* trace1 = part->trace;
91*810390e3Srobert Lock trace_lock(&trace1->mtx);
92*810390e3Srobert part->trace = nullptr;
93*810390e3Srobert TracePart* part1 = trace1->parts.PopFront();
94*810390e3Srobert CHECK_EQ(part, part1);
95*810390e3Srobert if (trace1->parts_allocated > trace1->parts.Size()) {
96*810390e3Srobert ctx->trace_part_finished_excess +=
97*810390e3Srobert trace1->parts_allocated - trace1->parts.Size();
98*810390e3Srobert trace1->parts_allocated = trace1->parts.Size();
99d89ec533Spatrick }
100d89ec533Spatrick }
101*810390e3Srobert }
102*810390e3Srobert if (trace->parts_allocated < max_parts) {
103*810390e3Srobert trace->parts_allocated++;
104*810390e3Srobert if (ctx->trace_part_finished_excess)
105*810390e3Srobert ctx->trace_part_finished_excess--;
106*810390e3Srobert }
107*810390e3Srobert if (!part)
108*810390e3Srobert ctx->trace_part_total_allocated++;
109*810390e3Srobert else if (ctx->trace_part_recycle_finished)
110*810390e3Srobert ctx->trace_part_recycle_finished--;
111*810390e3Srobert }
112*810390e3Srobert if (!part)
113*810390e3Srobert part = new (MmapOrDie(sizeof(*part), "TracePart")) TracePart();
114*810390e3Srobert return part;
1153cab2bb3Spatrick }
1163cab2bb3Spatrick
TracePartFree(TracePart * part)117*810390e3Srobert static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
118*810390e3Srobert DCHECK(part->trace);
119*810390e3Srobert part->trace = nullptr;
120*810390e3Srobert ctx->trace_part_recycle.PushFront(part);
121*810390e3Srobert }
122*810390e3Srobert
TraceResetForTesting()123*810390e3Srobert void TraceResetForTesting() {
124*810390e3Srobert Lock lock(&ctx->slot_mtx);
125*810390e3Srobert while (auto* part = ctx->trace_part_recycle.PopFront()) {
126*810390e3Srobert if (auto trace = part->trace)
127*810390e3Srobert CHECK_EQ(trace->parts.PopFront(), part);
128*810390e3Srobert UnmapOrDie(part, sizeof(*part));
129*810390e3Srobert }
130*810390e3Srobert ctx->trace_part_total_allocated = 0;
131*810390e3Srobert ctx->trace_part_recycle_finished = 0;
132*810390e3Srobert ctx->trace_part_finished_excess = 0;
133*810390e3Srobert }
134*810390e3Srobert
DoResetImpl(uptr epoch)135*810390e3Srobert static void DoResetImpl(uptr epoch) {
136*810390e3Srobert ThreadRegistryLock lock0(&ctx->thread_registry);
137*810390e3Srobert Lock lock1(&ctx->slot_mtx);
138*810390e3Srobert CHECK_EQ(ctx->global_epoch, epoch);
139*810390e3Srobert ctx->global_epoch++;
140*810390e3Srobert CHECK(!ctx->resetting);
141*810390e3Srobert ctx->resetting = true;
142*810390e3Srobert for (u32 i = ctx->thread_registry.NumThreadsLocked(); i--;) {
143*810390e3Srobert ThreadContext* tctx = (ThreadContext*)ctx->thread_registry.GetThreadLocked(
144*810390e3Srobert static_cast<Tid>(i));
145*810390e3Srobert // Potentially we could purge all ThreadStatusDead threads from the
146*810390e3Srobert // registry. Since we reset all shadow, they can't race with anything
147*810390e3Srobert // anymore. However, their tid's can still be stored in some aux places
148*810390e3Srobert // (e.g. tid of thread that created something).
149*810390e3Srobert auto trace = &tctx->trace;
150*810390e3Srobert Lock lock(&trace->mtx);
151*810390e3Srobert bool attached = tctx->thr && tctx->thr->slot;
152*810390e3Srobert auto parts = &trace->parts;
153*810390e3Srobert bool local = false;
154*810390e3Srobert while (!parts->Empty()) {
155*810390e3Srobert auto part = parts->Front();
156*810390e3Srobert local = local || part == trace->local_head;
157*810390e3Srobert if (local)
158*810390e3Srobert CHECK(!ctx->trace_part_recycle.Queued(part));
159*810390e3Srobert else
160*810390e3Srobert ctx->trace_part_recycle.Remove(part);
161*810390e3Srobert if (attached && parts->Size() == 1) {
162*810390e3Srobert // The thread is running and this is the last/current part.
163*810390e3Srobert // Set the trace position to the end of the current part
164*810390e3Srobert // to force the thread to call SwitchTracePart and re-attach
165*810390e3Srobert // to a new slot and allocate a new trace part.
166*810390e3Srobert // Note: the thread is concurrently modifying the position as well,
167*810390e3Srobert // so this is only best-effort. The thread can only modify position
168*810390e3Srobert // within this part, because switching parts is protected by
169*810390e3Srobert // slot/trace mutexes that we hold here.
170*810390e3Srobert atomic_store_relaxed(
171*810390e3Srobert &tctx->thr->trace_pos,
172*810390e3Srobert reinterpret_cast<uptr>(&part->events[TracePart::kSize]));
173*810390e3Srobert break;
174*810390e3Srobert }
175*810390e3Srobert parts->Remove(part);
176*810390e3Srobert TracePartFree(part);
177*810390e3Srobert }
178*810390e3Srobert CHECK_LE(parts->Size(), 1);
179*810390e3Srobert trace->local_head = parts->Front();
180*810390e3Srobert if (tctx->thr && !tctx->thr->slot) {
181*810390e3Srobert atomic_store_relaxed(&tctx->thr->trace_pos, 0);
182*810390e3Srobert tctx->thr->trace_prev_pc = 0;
183*810390e3Srobert }
184*810390e3Srobert if (trace->parts_allocated > trace->parts.Size()) {
185*810390e3Srobert ctx->trace_part_finished_excess +=
186*810390e3Srobert trace->parts_allocated - trace->parts.Size();
187*810390e3Srobert trace->parts_allocated = trace->parts.Size();
188*810390e3Srobert }
189*810390e3Srobert }
190*810390e3Srobert while (ctx->slot_queue.PopFront()) {
191*810390e3Srobert }
192*810390e3Srobert for (auto& slot : ctx->slots) {
193*810390e3Srobert slot.SetEpoch(kEpochZero);
194*810390e3Srobert slot.journal.Reset();
195*810390e3Srobert slot.thr = nullptr;
196*810390e3Srobert ctx->slot_queue.PushBack(&slot);
197*810390e3Srobert }
198*810390e3Srobert
199*810390e3Srobert DPrintf("Resetting shadow...\n");
200*810390e3Srobert auto shadow_begin = ShadowBeg();
201*810390e3Srobert auto shadow_end = ShadowEnd();
202*810390e3Srobert #if SANITIZER_GO
203*810390e3Srobert CHECK_NE(0, ctx->mapped_shadow_begin);
204*810390e3Srobert shadow_begin = ctx->mapped_shadow_begin;
205*810390e3Srobert shadow_end = ctx->mapped_shadow_end;
206*810390e3Srobert VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n",
207*810390e3Srobert shadow_begin, shadow_end);
208*810390e3Srobert #endif
209*810390e3Srobert
210*810390e3Srobert #if SANITIZER_WINDOWS
211*810390e3Srobert auto resetFailed =
212*810390e3Srobert !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin);
2133cab2bb3Spatrick #else
214*810390e3Srobert auto resetFailed =
215*810390e3Srobert !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow");
216*810390e3Srobert # if !SANITIZER_GO
217*810390e3Srobert DontDumpShadow(shadow_begin, shadow_end - shadow_begin);
2183cab2bb3Spatrick # endif
219*810390e3Srobert #endif
220*810390e3Srobert if (resetFailed) {
221*810390e3Srobert Printf("failed to reset shadow memory\n");
222*810390e3Srobert Die();
223*810390e3Srobert }
224*810390e3Srobert DPrintf("Resetting meta shadow...\n");
225*810390e3Srobert ctx->metamap.ResetClocks();
226*810390e3Srobert StoreShadow(&ctx->last_spurious_race, Shadow::kEmpty);
227*810390e3Srobert ctx->resetting = false;
228*810390e3Srobert }
229*810390e3Srobert
230*810390e3Srobert // Clang does not understand locking all slots in the loop:
231*810390e3Srobert // error: expecting mutex 'slot.mtx' to be held at start of each loop
DoReset(ThreadState * thr,uptr epoch)232*810390e3Srobert void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
233*810390e3Srobert for (auto& slot : ctx->slots) {
234*810390e3Srobert slot.mtx.Lock();
235*810390e3Srobert if (UNLIKELY(epoch == 0))
236*810390e3Srobert epoch = ctx->global_epoch;
237*810390e3Srobert if (UNLIKELY(epoch != ctx->global_epoch)) {
238*810390e3Srobert // Epoch can't change once we've locked the first slot.
239*810390e3Srobert CHECK_EQ(slot.sid, 0);
240*810390e3Srobert slot.mtx.Unlock();
241*810390e3Srobert return;
242*810390e3Srobert }
243*810390e3Srobert }
244*810390e3Srobert DPrintf("#%d: DoReset epoch=%lu\n", thr ? thr->tid : -1, epoch);
245*810390e3Srobert DoResetImpl(epoch);
246*810390e3Srobert for (auto& slot : ctx->slots) slot.mtx.Unlock();
247*810390e3Srobert }
248*810390e3Srobert
FlushShadowMemory()249*810390e3Srobert void FlushShadowMemory() { DoReset(nullptr, 0); }
250*810390e3Srobert
FindSlotAndLock(ThreadState * thr)251*810390e3Srobert static TidSlot* FindSlotAndLock(ThreadState* thr)
252*810390e3Srobert SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
253*810390e3Srobert CHECK(!thr->slot);
254*810390e3Srobert TidSlot* slot = nullptr;
255*810390e3Srobert for (;;) {
256*810390e3Srobert uptr epoch;
257*810390e3Srobert {
258*810390e3Srobert Lock lock(&ctx->slot_mtx);
259*810390e3Srobert epoch = ctx->global_epoch;
260*810390e3Srobert if (slot) {
261*810390e3Srobert // This is an exhausted slot from the previous iteration.
262*810390e3Srobert if (ctx->slot_queue.Queued(slot))
263*810390e3Srobert ctx->slot_queue.Remove(slot);
264*810390e3Srobert thr->slot_locked = false;
265*810390e3Srobert slot->mtx.Unlock();
266*810390e3Srobert }
267*810390e3Srobert for (;;) {
268*810390e3Srobert slot = ctx->slot_queue.PopFront();
269*810390e3Srobert if (!slot)
270*810390e3Srobert break;
271*810390e3Srobert if (slot->epoch() != kEpochLast) {
272*810390e3Srobert ctx->slot_queue.PushBack(slot);
273*810390e3Srobert break;
274*810390e3Srobert }
275*810390e3Srobert }
276*810390e3Srobert }
277*810390e3Srobert if (!slot) {
278*810390e3Srobert DoReset(thr, epoch);
279*810390e3Srobert continue;
280*810390e3Srobert }
281*810390e3Srobert slot->mtx.Lock();
282*810390e3Srobert CHECK(!thr->slot_locked);
283*810390e3Srobert thr->slot_locked = true;
284*810390e3Srobert if (slot->thr) {
285*810390e3Srobert DPrintf("#%d: preempting sid=%d tid=%d\n", thr->tid, (u32)slot->sid,
286*810390e3Srobert slot->thr->tid);
287*810390e3Srobert slot->SetEpoch(slot->thr->fast_state.epoch());
288*810390e3Srobert slot->thr = nullptr;
289*810390e3Srobert }
290*810390e3Srobert if (slot->epoch() != kEpochLast)
291*810390e3Srobert return slot;
292*810390e3Srobert }
293*810390e3Srobert }
294*810390e3Srobert
SlotAttachAndLock(ThreadState * thr)295*810390e3Srobert void SlotAttachAndLock(ThreadState* thr) {
296*810390e3Srobert TidSlot* slot = FindSlotAndLock(thr);
297*810390e3Srobert DPrintf("#%d: SlotAttach: slot=%u\n", thr->tid, static_cast<int>(slot->sid));
298*810390e3Srobert CHECK(!slot->thr);
299*810390e3Srobert CHECK(!thr->slot);
300*810390e3Srobert slot->thr = thr;
301*810390e3Srobert thr->slot = slot;
302*810390e3Srobert Epoch epoch = EpochInc(slot->epoch());
303*810390e3Srobert CHECK(!EpochOverflow(epoch));
304*810390e3Srobert slot->SetEpoch(epoch);
305*810390e3Srobert thr->fast_state.SetSid(slot->sid);
306*810390e3Srobert thr->fast_state.SetEpoch(epoch);
307*810390e3Srobert if (thr->slot_epoch != ctx->global_epoch) {
308*810390e3Srobert thr->slot_epoch = ctx->global_epoch;
309*810390e3Srobert thr->clock.Reset();
310*810390e3Srobert #if !SANITIZER_GO
311*810390e3Srobert thr->last_sleep_stack_id = kInvalidStackID;
312*810390e3Srobert thr->last_sleep_clock.Reset();
313*810390e3Srobert #endif
314*810390e3Srobert }
315*810390e3Srobert thr->clock.Set(slot->sid, epoch);
316*810390e3Srobert slot->journal.PushBack({thr->tid, epoch});
317*810390e3Srobert }
318*810390e3Srobert
SlotDetachImpl(ThreadState * thr,bool exiting)319*810390e3Srobert static void SlotDetachImpl(ThreadState* thr, bool exiting) {
320*810390e3Srobert TidSlot* slot = thr->slot;
321*810390e3Srobert thr->slot = nullptr;
322*810390e3Srobert if (thr != slot->thr) {
323*810390e3Srobert slot = nullptr; // we don't own the slot anymore
324*810390e3Srobert if (thr->slot_epoch != ctx->global_epoch) {
325*810390e3Srobert TracePart* part = nullptr;
326*810390e3Srobert auto* trace = &thr->tctx->trace;
327*810390e3Srobert {
328*810390e3Srobert Lock l(&trace->mtx);
329*810390e3Srobert auto* parts = &trace->parts;
330*810390e3Srobert // The trace can be completely empty in an unlikely event
331*810390e3Srobert // the thread is preempted right after it acquired the slot
332*810390e3Srobert // in ThreadStart and did not trace any events yet.
333*810390e3Srobert CHECK_LE(parts->Size(), 1);
334*810390e3Srobert part = parts->PopFront();
335*810390e3Srobert thr->tctx->trace.local_head = nullptr;
336*810390e3Srobert atomic_store_relaxed(&thr->trace_pos, 0);
337*810390e3Srobert thr->trace_prev_pc = 0;
338*810390e3Srobert }
339*810390e3Srobert if (part) {
340*810390e3Srobert Lock l(&ctx->slot_mtx);
341*810390e3Srobert TracePartFree(part);
342*810390e3Srobert }
343*810390e3Srobert }
344*810390e3Srobert return;
345*810390e3Srobert }
346*810390e3Srobert CHECK(exiting || thr->fast_state.epoch() == kEpochLast);
347*810390e3Srobert slot->SetEpoch(thr->fast_state.epoch());
348*810390e3Srobert slot->thr = nullptr;
349*810390e3Srobert }
350*810390e3Srobert
SlotDetach(ThreadState * thr)351*810390e3Srobert void SlotDetach(ThreadState* thr) {
352*810390e3Srobert Lock lock(&thr->slot->mtx);
353*810390e3Srobert SlotDetachImpl(thr, true);
354*810390e3Srobert }
355*810390e3Srobert
SlotLock(ThreadState * thr)356*810390e3Srobert void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
357*810390e3Srobert DCHECK(!thr->slot_locked);
358*810390e3Srobert #if SANITIZER_DEBUG
359*810390e3Srobert // Check these mutexes are not locked.
360*810390e3Srobert // We can call DoReset from SlotAttachAndLock, which will lock
361*810390e3Srobert // these mutexes, but it happens only every once in a while.
362*810390e3Srobert { ThreadRegistryLock lock(&ctx->thread_registry); }
363*810390e3Srobert { Lock lock(&ctx->slot_mtx); }
364*810390e3Srobert #endif
365*810390e3Srobert TidSlot* slot = thr->slot;
366*810390e3Srobert slot->mtx.Lock();
367*810390e3Srobert thr->slot_locked = true;
368*810390e3Srobert if (LIKELY(thr == slot->thr && thr->fast_state.epoch() != kEpochLast))
369*810390e3Srobert return;
370*810390e3Srobert SlotDetachImpl(thr, false);
371*810390e3Srobert thr->slot_locked = false;
372*810390e3Srobert slot->mtx.Unlock();
373*810390e3Srobert SlotAttachAndLock(thr);
374*810390e3Srobert }
375*810390e3Srobert
SlotUnlock(ThreadState * thr)376*810390e3Srobert void SlotUnlock(ThreadState* thr) {
377*810390e3Srobert DCHECK(thr->slot_locked);
378*810390e3Srobert thr->slot_locked = false;
379*810390e3Srobert thr->slot->mtx.Unlock();
380*810390e3Srobert }
3813cab2bb3Spatrick
Context()3823cab2bb3Spatrick Context::Context()
383d89ec533Spatrick : initialized(),
384d89ec533Spatrick report_mtx(MutexTypeReport),
385d89ec533Spatrick nreported(),
386*810390e3Srobert thread_registry([](Tid tid) -> ThreadContextBase* {
387*810390e3Srobert return new (Alloc(sizeof(ThreadContext))) ThreadContext(tid);
388*810390e3Srobert }),
389d89ec533Spatrick racy_mtx(MutexTypeRacy),
390d89ec533Spatrick racy_stacks(),
391d89ec533Spatrick fired_suppressions_mtx(MutexTypeFired),
392*810390e3Srobert slot_mtx(MutexTypeSlots),
393*810390e3Srobert resetting() {
3943cab2bb3Spatrick fired_suppressions.reserve(8);
395*810390e3Srobert for (uptr i = 0; i < ARRAY_SIZE(slots); i++) {
396*810390e3Srobert TidSlot* slot = &slots[i];
397*810390e3Srobert slot->sid = static_cast<Sid>(i);
398*810390e3Srobert slot_queue.PushBack(slot);
3993cab2bb3Spatrick }
400*810390e3Srobert global_epoch = 1;
401*810390e3Srobert }
402*810390e3Srobert
TidSlot()403*810390e3Srobert TidSlot::TidSlot() : mtx(MutexTypeSlot) {}
4043cab2bb3Spatrick
4053cab2bb3Spatrick // The objects are allocated in TLS, so one may rely on zero-initialization.
ThreadState(Tid tid)406*810390e3Srobert ThreadState::ThreadState(Tid tid)
4073cab2bb3Spatrick // Do not touch these, rely on zero initialization,
4083cab2bb3Spatrick // they may be accessed before the ctor.
409*810390e3Srobert // ignore_reads_and_writes()
410*810390e3Srobert // ignore_interceptors()
411*810390e3Srobert : tid(tid) {
412*810390e3Srobert CHECK_EQ(reinterpret_cast<uptr>(this) % SANITIZER_CACHE_LINE_SIZE, 0);
4133cab2bb3Spatrick #if !SANITIZER_GO
414*810390e3Srobert // C/C++ uses fixed size shadow stack.
415*810390e3Srobert const int kInitStackSize = kShadowStackSize;
416*810390e3Srobert shadow_stack = static_cast<uptr*>(
417*810390e3Srobert MmapNoReserveOrDie(kInitStackSize * sizeof(uptr), "shadow stack"));
418*810390e3Srobert SetShadowRegionHugePageMode(reinterpret_cast<uptr>(shadow_stack),
419*810390e3Srobert kInitStackSize * sizeof(uptr));
420*810390e3Srobert #else
421*810390e3Srobert // Go uses malloc-allocated shadow stack with dynamic size.
422*810390e3Srobert const int kInitStackSize = 8;
423*810390e3Srobert shadow_stack = static_cast<uptr*>(Alloc(kInitStackSize * sizeof(uptr)));
4243cab2bb3Spatrick #endif
425*810390e3Srobert shadow_stack_pos = shadow_stack;
426*810390e3Srobert shadow_stack_end = shadow_stack + kInitStackSize;
4273cab2bb3Spatrick }
4283cab2bb3Spatrick
4293cab2bb3Spatrick #if !SANITIZER_GO
MemoryProfiler(u64 uptime)430*810390e3Srobert void MemoryProfiler(u64 uptime) {
431*810390e3Srobert if (ctx->memprof_fd == kInvalidFd)
432*810390e3Srobert return;
4333cab2bb3Spatrick InternalMmapVector<char> buf(4096);
434*810390e3Srobert WriteMemoryProfile(buf.data(), buf.size(), uptime);
435*810390e3Srobert WriteToFile(ctx->memprof_fd, buf.data(), internal_strlen(buf.data()));
436*810390e3Srobert }
437*810390e3Srobert
InitializeMemoryProfiler()438*810390e3Srobert static bool InitializeMemoryProfiler() {
439*810390e3Srobert ctx->memprof_fd = kInvalidFd;
440*810390e3Srobert const char *fname = flags()->profile_memory;
441*810390e3Srobert if (!fname || !fname[0])
442*810390e3Srobert return false;
443*810390e3Srobert if (internal_strcmp(fname, "stdout") == 0) {
444*810390e3Srobert ctx->memprof_fd = 1;
445*810390e3Srobert } else if (internal_strcmp(fname, "stderr") == 0) {
446*810390e3Srobert ctx->memprof_fd = 2;
447*810390e3Srobert } else {
448*810390e3Srobert InternalScopedString filename;
449*810390e3Srobert filename.append("%s.%d", fname, (int)internal_getpid());
450*810390e3Srobert ctx->memprof_fd = OpenFile(filename.data(), WrOnly);
451*810390e3Srobert if (ctx->memprof_fd == kInvalidFd) {
452*810390e3Srobert Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
453*810390e3Srobert filename.data());
454*810390e3Srobert return false;
455*810390e3Srobert }
456*810390e3Srobert }
457*810390e3Srobert MemoryProfiler(0);
458*810390e3Srobert return true;
4593cab2bb3Spatrick }
4603cab2bb3Spatrick
BackgroundThread(void * arg)4611f9cb04fSpatrick static void *BackgroundThread(void *arg) {
4623cab2bb3Spatrick // This is a non-initialized non-user thread, nothing to see here.
4633cab2bb3Spatrick // We don't use ScopedIgnoreInterceptors, because we want ignores to be
4643cab2bb3Spatrick // enabled even when the thread function exits (e.g. during pthread thread
4653cab2bb3Spatrick // shutdown code).
466*810390e3Srobert cur_thread_init()->ignore_interceptors++;
4673cab2bb3Spatrick const u64 kMs2Ns = 1000 * 1000;
468*810390e3Srobert const u64 start = NanoTime();
4693cab2bb3Spatrick
470*810390e3Srobert u64 last_flush = start;
4713cab2bb3Spatrick uptr last_rss = 0;
472*810390e3Srobert while (!atomic_load_relaxed(&ctx->stop_background_thread)) {
4733cab2bb3Spatrick SleepForMillis(100);
4743cab2bb3Spatrick u64 now = NanoTime();
4753cab2bb3Spatrick
4763cab2bb3Spatrick // Flush memory if requested.
4773cab2bb3Spatrick if (flags()->flush_memory_ms > 0) {
4783cab2bb3Spatrick if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
479*810390e3Srobert VReport(1, "ThreadSanitizer: periodic memory flush\n");
4803cab2bb3Spatrick FlushShadowMemory();
481*810390e3Srobert now = last_flush = NanoTime();
4823cab2bb3Spatrick }
4833cab2bb3Spatrick }
4843cab2bb3Spatrick if (flags()->memory_limit_mb > 0) {
4853cab2bb3Spatrick uptr rss = GetRSS();
4863cab2bb3Spatrick uptr limit = uptr(flags()->memory_limit_mb) << 20;
487*810390e3Srobert VReport(1,
488*810390e3Srobert "ThreadSanitizer: memory flush check"
4893cab2bb3Spatrick " RSS=%llu LAST=%llu LIMIT=%llu\n",
4903cab2bb3Spatrick (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
4913cab2bb3Spatrick if (2 * rss > limit + last_rss) {
492*810390e3Srobert VReport(1, "ThreadSanitizer: flushing memory due to RSS\n");
4933cab2bb3Spatrick FlushShadowMemory();
4943cab2bb3Spatrick rss = GetRSS();
495*810390e3Srobert now = NanoTime();
496*810390e3Srobert VReport(1, "ThreadSanitizer: memory flushed RSS=%llu\n",
497*810390e3Srobert (u64)rss >> 20);
4983cab2bb3Spatrick }
4993cab2bb3Spatrick last_rss = rss;
5003cab2bb3Spatrick }
5013cab2bb3Spatrick
502*810390e3Srobert MemoryProfiler(now - start);
5033cab2bb3Spatrick
5043cab2bb3Spatrick // Flush symbolizer cache if requested.
5053cab2bb3Spatrick if (flags()->flush_symbolizer_ms > 0) {
5063cab2bb3Spatrick u64 last = atomic_load(&ctx->last_symbolize_time_ns,
5073cab2bb3Spatrick memory_order_relaxed);
5083cab2bb3Spatrick if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
5093cab2bb3Spatrick Lock l(&ctx->report_mtx);
5103cab2bb3Spatrick ScopedErrorReportLock l2;
5113cab2bb3Spatrick SymbolizeFlush();
5123cab2bb3Spatrick atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
5133cab2bb3Spatrick }
5143cab2bb3Spatrick }
5153cab2bb3Spatrick }
5161f9cb04fSpatrick return nullptr;
5173cab2bb3Spatrick }
5183cab2bb3Spatrick
StartBackgroundThread()5193cab2bb3Spatrick static void StartBackgroundThread() {
5203cab2bb3Spatrick ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
5213cab2bb3Spatrick }
5223cab2bb3Spatrick
5233cab2bb3Spatrick #ifndef __mips__
StopBackgroundThread()5243cab2bb3Spatrick static void StopBackgroundThread() {
5253cab2bb3Spatrick atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
5263cab2bb3Spatrick internal_join_thread(ctx->background_thread);
5273cab2bb3Spatrick ctx->background_thread = 0;
5283cab2bb3Spatrick }
5293cab2bb3Spatrick #endif
5303cab2bb3Spatrick #endif
5313cab2bb3Spatrick
DontNeedShadowFor(uptr addr,uptr size)5323cab2bb3Spatrick void DontNeedShadowFor(uptr addr, uptr size) {
533*810390e3Srobert ReleaseMemoryPagesToOS(reinterpret_cast<uptr>(MemToShadow(addr)),
534*810390e3Srobert reinterpret_cast<uptr>(MemToShadow(addr + size)));
5353cab2bb3Spatrick }
5363cab2bb3Spatrick
5373cab2bb3Spatrick #if !SANITIZER_GO
538*810390e3Srobert // We call UnmapShadow before the actual munmap, at that point we don't yet
539*810390e3Srobert // know if the provided address/size are sane. We can't call UnmapShadow
540*810390e3Srobert // after the actual munmap becuase at that point the memory range can
541*810390e3Srobert // already be reused for something else, so we can't rely on the munmap
542*810390e3Srobert // return value to understand is the values are sane.
543*810390e3Srobert // While calling munmap with insane values (non-canonical address, negative
544*810390e3Srobert // size, etc) is an error, the kernel won't crash. We must also try to not
545*810390e3Srobert // crash as the failure mode is very confusing (paging fault inside of the
546*810390e3Srobert // runtime on some derived shadow address).
IsValidMmapRange(uptr addr,uptr size)547*810390e3Srobert static bool IsValidMmapRange(uptr addr, uptr size) {
548*810390e3Srobert if (size == 0)
549*810390e3Srobert return true;
550*810390e3Srobert if (static_cast<sptr>(size) < 0)
551*810390e3Srobert return false;
552*810390e3Srobert if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
553*810390e3Srobert return false;
554*810390e3Srobert // Check that if the start of the region belongs to one of app ranges,
555*810390e3Srobert // end of the region belongs to the same region.
556*810390e3Srobert const uptr ranges[][2] = {
557*810390e3Srobert {LoAppMemBeg(), LoAppMemEnd()},
558*810390e3Srobert {MidAppMemBeg(), MidAppMemEnd()},
559*810390e3Srobert {HiAppMemBeg(), HiAppMemEnd()},
560*810390e3Srobert };
561*810390e3Srobert for (auto range : ranges) {
562*810390e3Srobert if (addr >= range[0] && addr < range[1])
563*810390e3Srobert return addr + size <= range[1];
564*810390e3Srobert }
565*810390e3Srobert return false;
566*810390e3Srobert }
567*810390e3Srobert
UnmapShadow(ThreadState * thr,uptr addr,uptr size)5683cab2bb3Spatrick void UnmapShadow(ThreadState *thr, uptr addr, uptr size) {
569*810390e3Srobert if (size == 0 || !IsValidMmapRange(addr, size))
570*810390e3Srobert return;
5713cab2bb3Spatrick DontNeedShadowFor(addr, size);
5723cab2bb3Spatrick ScopedGlobalProcessor sgp;
573*810390e3Srobert SlotLocker locker(thr, true);
574*810390e3Srobert ctx->metamap.ResetRange(thr->proc(), addr, size, true);
5753cab2bb3Spatrick }
5763cab2bb3Spatrick #endif
5773cab2bb3Spatrick
MapShadow(uptr addr,uptr size)5783cab2bb3Spatrick void MapShadow(uptr addr, uptr size) {
579*810390e3Srobert // Ensure thead registry lock held, so as to synchronize
580*810390e3Srobert // with DoReset, which also access the mapped_shadow_* ctxt fields.
581*810390e3Srobert ThreadRegistryLock lock0(&ctx->thread_registry);
582*810390e3Srobert static bool data_mapped = false;
583*810390e3Srobert
584*810390e3Srobert #if !SANITIZER_GO
5853cab2bb3Spatrick // Global data is not 64K aligned, but there are no adjacent mappings,
5863cab2bb3Spatrick // so we can get away with unaligned mapping.
5873cab2bb3Spatrick // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
5883cab2bb3Spatrick const uptr kPageSize = GetPageSizeCached();
5893cab2bb3Spatrick uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
5903cab2bb3Spatrick uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
591*810390e3Srobert if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
592*810390e3Srobert Die();
593*810390e3Srobert #else
594*810390e3Srobert uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10));
595*810390e3Srobert uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10));
596*810390e3Srobert VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n",
597*810390e3Srobert addr, addr + size, shadow_begin, shadow_end);
598*810390e3Srobert
599*810390e3Srobert if (!data_mapped) {
600*810390e3Srobert // First call maps data+bss.
601*810390e3Srobert if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
602*810390e3Srobert Die();
603*810390e3Srobert } else {
604*810390e3Srobert VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n",
605*810390e3Srobert ctx->mapped_shadow_begin, ctx->mapped_shadow_end);
606*810390e3Srobert // Second and subsequent calls map heap.
607*810390e3Srobert if (shadow_end <= ctx->mapped_shadow_end)
608*810390e3Srobert return;
609*810390e3Srobert if (!ctx->mapped_shadow_begin || ctx->mapped_shadow_begin > shadow_begin)
610*810390e3Srobert ctx->mapped_shadow_begin = shadow_begin;
611*810390e3Srobert if (shadow_begin < ctx->mapped_shadow_end)
612*810390e3Srobert shadow_begin = ctx->mapped_shadow_end;
613*810390e3Srobert VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n",
614*810390e3Srobert shadow_begin, shadow_end);
615d89ec533Spatrick if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin,
616d89ec533Spatrick "shadow"))
6173cab2bb3Spatrick Die();
618*810390e3Srobert ctx->mapped_shadow_end = shadow_end;
619*810390e3Srobert }
620*810390e3Srobert #endif
6213cab2bb3Spatrick
6223cab2bb3Spatrick // Meta shadow is 2:1, so tread carefully.
6233cab2bb3Spatrick static uptr mapped_meta_end = 0;
6243cab2bb3Spatrick uptr meta_begin = (uptr)MemToMeta(addr);
6253cab2bb3Spatrick uptr meta_end = (uptr)MemToMeta(addr + size);
6263cab2bb3Spatrick meta_begin = RoundDownTo(meta_begin, 64 << 10);
6273cab2bb3Spatrick meta_end = RoundUpTo(meta_end, 64 << 10);
6283cab2bb3Spatrick if (!data_mapped) {
6293cab2bb3Spatrick // First call maps data+bss.
6303cab2bb3Spatrick data_mapped = true;
631d89ec533Spatrick if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
632d89ec533Spatrick "meta shadow"))
6333cab2bb3Spatrick Die();
6343cab2bb3Spatrick } else {
635*810390e3Srobert // Mapping continuous heap.
6363cab2bb3Spatrick // Windows wants 64K alignment.
6373cab2bb3Spatrick meta_begin = RoundDownTo(meta_begin, 64 << 10);
6383cab2bb3Spatrick meta_end = RoundUpTo(meta_end, 64 << 10);
639*810390e3Srobert CHECK_GT(meta_end, mapped_meta_end);
6403cab2bb3Spatrick if (meta_begin < mapped_meta_end)
6413cab2bb3Spatrick meta_begin = mapped_meta_end;
642d89ec533Spatrick if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,
643d89ec533Spatrick "meta shadow"))
6443cab2bb3Spatrick Die();
6453cab2bb3Spatrick mapped_meta_end = meta_end;
6463cab2bb3Spatrick }
647*810390e3Srobert VPrintf(2, "mapped meta shadow for (0x%zx-0x%zx) at (0x%zx-0x%zx)\n", addr,
648*810390e3Srobert addr + size, meta_begin, meta_end);
6493cab2bb3Spatrick }
6503cab2bb3Spatrick
6513cab2bb3Spatrick #if !SANITIZER_GO
OnStackUnwind(const SignalContext & sig,const void *,BufferedStackTrace * stack)6523cab2bb3Spatrick static void OnStackUnwind(const SignalContext &sig, const void *,
6533cab2bb3Spatrick BufferedStackTrace *stack) {
6543cab2bb3Spatrick stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
6553cab2bb3Spatrick common_flags()->fast_unwind_on_fatal);
6563cab2bb3Spatrick }
6573cab2bb3Spatrick
TsanOnDeadlySignal(int signo,void * siginfo,void * context)6583cab2bb3Spatrick static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
6593cab2bb3Spatrick HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
6603cab2bb3Spatrick }
6613cab2bb3Spatrick #endif
6623cab2bb3Spatrick
CheckUnwind()663d89ec533Spatrick void CheckUnwind() {
664d89ec533Spatrick // There is high probability that interceptors will check-fail as well,
665d89ec533Spatrick // on the other hand there is no sense in processing interceptors
666d89ec533Spatrick // since we are going to die soon.
667d89ec533Spatrick ScopedIgnoreInterceptors ignore;
668d89ec533Spatrick #if !SANITIZER_GO
669*810390e3Srobert ThreadState* thr = cur_thread();
670*810390e3Srobert thr->nomalloc = false;
671*810390e3Srobert thr->ignore_sync++;
672*810390e3Srobert thr->ignore_reads_and_writes++;
673*810390e3Srobert atomic_store_relaxed(&thr->in_signal_handler, 0);
674d89ec533Spatrick #endif
675d89ec533Spatrick PrintCurrentStackSlow(StackTrace::GetCurrentPc());
676d89ec533Spatrick }
677d89ec533Spatrick
678*810390e3Srobert bool is_initialized;
679*810390e3Srobert
Initialize(ThreadState * thr)6803cab2bb3Spatrick void Initialize(ThreadState *thr) {
6813cab2bb3Spatrick // Thread safe because done before all threads exist.
6823cab2bb3Spatrick if (is_initialized)
6833cab2bb3Spatrick return;
6843cab2bb3Spatrick is_initialized = true;
6853cab2bb3Spatrick // We are not ready to handle interceptors yet.
6863cab2bb3Spatrick ScopedIgnoreInterceptors ignore;
6873cab2bb3Spatrick SanitizerToolName = "ThreadSanitizer";
6883cab2bb3Spatrick // Install tool-specific callbacks in sanitizer_common.
689d89ec533Spatrick SetCheckUnwindCallback(CheckUnwind);
6903cab2bb3Spatrick
6913cab2bb3Spatrick ctx = new(ctx_placeholder) Context;
6923cab2bb3Spatrick const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
6933cab2bb3Spatrick const char *options = GetEnv(env_name);
6943cab2bb3Spatrick CacheBinaryName();
6953cab2bb3Spatrick CheckASLR();
6963cab2bb3Spatrick InitializeFlags(&ctx->flags, options, env_name);
6973cab2bb3Spatrick AvoidCVE_2016_2143();
6983cab2bb3Spatrick __sanitizer::InitializePlatformEarly();
6993cab2bb3Spatrick __tsan::InitializePlatformEarly();
7003cab2bb3Spatrick
7013cab2bb3Spatrick #if !SANITIZER_GO
7023cab2bb3Spatrick InitializeAllocator();
7033cab2bb3Spatrick ReplaceSystemMalloc();
7043cab2bb3Spatrick #endif
7053cab2bb3Spatrick if (common_flags()->detect_deadlocks)
7063cab2bb3Spatrick ctx->dd = DDetector::Create(flags());
7073cab2bb3Spatrick Processor *proc = ProcCreate();
7083cab2bb3Spatrick ProcWire(proc, thr);
7093cab2bb3Spatrick InitializeInterceptors();
7103cab2bb3Spatrick InitializePlatform();
7113cab2bb3Spatrick InitializeDynamicAnnotations();
7123cab2bb3Spatrick #if !SANITIZER_GO
7133cab2bb3Spatrick InitializeShadowMemory();
7143cab2bb3Spatrick InitializeAllocatorLate();
7153cab2bb3Spatrick InstallDeadlySignalHandlers(TsanOnDeadlySignal);
7163cab2bb3Spatrick #endif
7173cab2bb3Spatrick // Setup correct file descriptor for error reports.
7183cab2bb3Spatrick __sanitizer_set_report_path(common_flags()->log_path);
7193cab2bb3Spatrick InitializeSuppressions();
7203cab2bb3Spatrick #if !SANITIZER_GO
7213cab2bb3Spatrick InitializeLibIgnore();
7223cab2bb3Spatrick Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
7233cab2bb3Spatrick #endif
7243cab2bb3Spatrick
725*810390e3Srobert VPrintf(1, "***** Running under ThreadSanitizer v3 (pid %d) *****\n",
7263cab2bb3Spatrick (int)internal_getpid());
7273cab2bb3Spatrick
7283cab2bb3Spatrick // Initialize thread 0.
729*810390e3Srobert Tid tid = ThreadCreate(nullptr, 0, 0, true);
730*810390e3Srobert CHECK_EQ(tid, kMainTid);
7313cab2bb3Spatrick ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
7323cab2bb3Spatrick #if TSAN_CONTAINS_UBSAN
7333cab2bb3Spatrick __ubsan::InitAsPlugin();
7343cab2bb3Spatrick #endif
7353cab2bb3Spatrick
7363cab2bb3Spatrick #if !SANITIZER_GO
7373cab2bb3Spatrick Symbolizer::LateInitialize();
738*810390e3Srobert if (InitializeMemoryProfiler() || flags()->force_background_thread)
739*810390e3Srobert MaybeSpawnBackgroundThread();
7403cab2bb3Spatrick #endif
741*810390e3Srobert ctx->initialized = true;
7423cab2bb3Spatrick
7433cab2bb3Spatrick if (flags()->stop_on_start) {
7443cab2bb3Spatrick Printf("ThreadSanitizer is suspended at startup (pid %d)."
7453cab2bb3Spatrick " Call __tsan_resume().\n",
7463cab2bb3Spatrick (int)internal_getpid());
7473cab2bb3Spatrick while (__tsan_resumed == 0) {}
7483cab2bb3Spatrick }
7493cab2bb3Spatrick
7503cab2bb3Spatrick OnInitialize();
7513cab2bb3Spatrick }
7523cab2bb3Spatrick
MaybeSpawnBackgroundThread()7533cab2bb3Spatrick void MaybeSpawnBackgroundThread() {
7543cab2bb3Spatrick // On MIPS, TSan initialization is run before
7553cab2bb3Spatrick // __pthread_initialize_minimal_internal() is finished, so we can not spawn
7563cab2bb3Spatrick // new threads.
7573cab2bb3Spatrick #if !SANITIZER_GO && !defined(__mips__)
7583cab2bb3Spatrick static atomic_uint32_t bg_thread = {};
7593cab2bb3Spatrick if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
7603cab2bb3Spatrick atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
7613cab2bb3Spatrick StartBackgroundThread();
7623cab2bb3Spatrick SetSandboxingCallback(StopBackgroundThread);
7633cab2bb3Spatrick }
7643cab2bb3Spatrick #endif
7653cab2bb3Spatrick }
7663cab2bb3Spatrick
Finalize(ThreadState * thr)7673cab2bb3Spatrick int Finalize(ThreadState *thr) {
7683cab2bb3Spatrick bool failed = false;
7693cab2bb3Spatrick
770*810390e3Srobert #if !SANITIZER_GO
771d89ec533Spatrick if (common_flags()->print_module_map == 1)
772d89ec533Spatrick DumpProcessMap();
773*810390e3Srobert #endif
7743cab2bb3Spatrick
7753cab2bb3Spatrick if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
776*810390e3Srobert internal_usleep(u64(flags()->atexit_sleep_ms) * 1000);
7773cab2bb3Spatrick
778*810390e3Srobert {
7793cab2bb3Spatrick // Wait for pending reports.
780*810390e3Srobert ScopedErrorReportLock lock;
781*810390e3Srobert }
7823cab2bb3Spatrick
7833cab2bb3Spatrick #if !SANITIZER_GO
7843cab2bb3Spatrick if (Verbosity()) AllocatorPrintStats();
7853cab2bb3Spatrick #endif
7863cab2bb3Spatrick
7873cab2bb3Spatrick ThreadFinalize(thr);
7883cab2bb3Spatrick
7893cab2bb3Spatrick if (ctx->nreported) {
7903cab2bb3Spatrick failed = true;
7913cab2bb3Spatrick #if !SANITIZER_GO
7923cab2bb3Spatrick Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
7933cab2bb3Spatrick #else
7943cab2bb3Spatrick Printf("Found %d data race(s)\n", ctx->nreported);
7953cab2bb3Spatrick #endif
7963cab2bb3Spatrick }
7973cab2bb3Spatrick
7983cab2bb3Spatrick if (common_flags()->print_suppressions)
7993cab2bb3Spatrick PrintMatchedSuppressions();
8003cab2bb3Spatrick
8013cab2bb3Spatrick failed = OnFinalize(failed);
8023cab2bb3Spatrick
8033cab2bb3Spatrick return failed ? common_flags()->exitcode : 0;
8043cab2bb3Spatrick }
8053cab2bb3Spatrick
8063cab2bb3Spatrick #if !SANITIZER_GO
ForkBefore(ThreadState * thr,uptr pc)807*810390e3Srobert void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
808*810390e3Srobert GlobalProcessorLock();
809*810390e3Srobert // Detaching from the slot makes OnUserFree skip writing to the shadow.
810*810390e3Srobert // The slot will be locked so any attempts to use it will deadlock anyway.
811*810390e3Srobert SlotDetach(thr);
812*810390e3Srobert for (auto& slot : ctx->slots) slot.mtx.Lock();
813*810390e3Srobert ctx->thread_registry.Lock();
814*810390e3Srobert ctx->slot_mtx.Lock();
815d89ec533Spatrick ScopedErrorReportLock::Lock();
816*810390e3Srobert AllocatorLock();
817d89ec533Spatrick // Suppress all reports in the pthread_atfork callbacks.
818d89ec533Spatrick // Reports will deadlock on the report_mtx.
819d89ec533Spatrick // We could ignore sync operations as well,
8201f9cb04fSpatrick // but so far it's unclear if it will do more good or harm.
8211f9cb04fSpatrick // Unnecessarily ignoring things can lead to false positives later.
822d89ec533Spatrick thr->suppress_reports++;
823d89ec533Spatrick // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
824d89ec533Spatrick // we'll assert in CheckNoLocks() unless we ignore interceptors.
825*810390e3Srobert // On OS X libSystem_atfork_prepare/parent/child callbacks are called
826*810390e3Srobert // after/before our callbacks and they call free.
827d89ec533Spatrick thr->ignore_interceptors++;
828*810390e3Srobert // Disables memory write in OnUserAlloc/Free.
829*810390e3Srobert thr->ignore_reads_and_writes++;
830*810390e3Srobert
831*810390e3Srobert __tsan_test_only_on_fork();
8323cab2bb3Spatrick }
8333cab2bb3Spatrick
ForkAfter(ThreadState * thr)834*810390e3Srobert static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
835d89ec533Spatrick thr->suppress_reports--; // Enabled in ForkBefore.
836d89ec533Spatrick thr->ignore_interceptors--;
837*810390e3Srobert thr->ignore_reads_and_writes--;
838*810390e3Srobert AllocatorUnlock();
839d89ec533Spatrick ScopedErrorReportLock::Unlock();
840*810390e3Srobert ctx->slot_mtx.Unlock();
841*810390e3Srobert ctx->thread_registry.Unlock();
842*810390e3Srobert for (auto& slot : ctx->slots) slot.mtx.Unlock();
843*810390e3Srobert SlotAttachAndLock(thr);
844*810390e3Srobert SlotUnlock(thr);
845*810390e3Srobert GlobalProcessorUnlock();
8463cab2bb3Spatrick }
8473cab2bb3Spatrick
ForkParentAfter(ThreadState * thr,uptr pc)848*810390e3Srobert void ForkParentAfter(ThreadState* thr, uptr pc) { ForkAfter(thr); }
8493cab2bb3Spatrick
ForkChildAfter(ThreadState * thr,uptr pc,bool start_thread)850*810390e3Srobert void ForkChildAfter(ThreadState* thr, uptr pc, bool start_thread) {
851*810390e3Srobert ForkAfter(thr);
852*810390e3Srobert u32 nthread = ctx->thread_registry.OnFork(thr->tid);
853*810390e3Srobert VPrintf(1,
854*810390e3Srobert "ThreadSanitizer: forked new process with pid %d,"
855*810390e3Srobert " parent had %d threads\n",
856*810390e3Srobert (int)internal_getpid(), (int)nthread);
8573cab2bb3Spatrick if (nthread == 1) {
858*810390e3Srobert if (start_thread)
8593cab2bb3Spatrick StartBackgroundThread();
8603cab2bb3Spatrick } else {
8613cab2bb3Spatrick // We've just forked a multi-threaded process. We cannot reasonably function
8623cab2bb3Spatrick // after that (some mutexes may be locked before fork). So just enable
8633cab2bb3Spatrick // ignores for everything in the hope that we will exec soon.
8643cab2bb3Spatrick ctx->after_multithreaded_fork = true;
8653cab2bb3Spatrick thr->ignore_interceptors++;
866*810390e3Srobert thr->suppress_reports++;
8673cab2bb3Spatrick ThreadIgnoreBegin(thr, pc);
8683cab2bb3Spatrick ThreadIgnoreSyncBegin(thr, pc);
8693cab2bb3Spatrick }
8703cab2bb3Spatrick }
8713cab2bb3Spatrick #endif
8723cab2bb3Spatrick
8733cab2bb3Spatrick #if SANITIZER_GO
8743cab2bb3Spatrick NOINLINE
GrowShadowStack(ThreadState * thr)8753cab2bb3Spatrick void GrowShadowStack(ThreadState *thr) {
8763cab2bb3Spatrick const int sz = thr->shadow_stack_end - thr->shadow_stack;
8773cab2bb3Spatrick const int newsz = 2 * sz;
878*810390e3Srobert auto *newstack = (uptr *)Alloc(newsz * sizeof(uptr));
8793cab2bb3Spatrick internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
880*810390e3Srobert Free(thr->shadow_stack);
8813cab2bb3Spatrick thr->shadow_stack = newstack;
8823cab2bb3Spatrick thr->shadow_stack_pos = newstack + sz;
8833cab2bb3Spatrick thr->shadow_stack_end = newstack + newsz;
8843cab2bb3Spatrick }
8853cab2bb3Spatrick #endif
8863cab2bb3Spatrick
CurrentStackId(ThreadState * thr,uptr pc)887*810390e3Srobert StackID CurrentStackId(ThreadState *thr, uptr pc) {
888*810390e3Srobert #if !SANITIZER_GO
8893cab2bb3Spatrick if (!thr->is_inited) // May happen during bootstrap.
890*810390e3Srobert return kInvalidStackID;
891*810390e3Srobert #endif
8923cab2bb3Spatrick if (pc != 0) {
8933cab2bb3Spatrick #if !SANITIZER_GO
8943cab2bb3Spatrick DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
8953cab2bb3Spatrick #else
8963cab2bb3Spatrick if (thr->shadow_stack_pos == thr->shadow_stack_end)
8973cab2bb3Spatrick GrowShadowStack(thr);
8983cab2bb3Spatrick #endif
8993cab2bb3Spatrick thr->shadow_stack_pos[0] = pc;
9003cab2bb3Spatrick thr->shadow_stack_pos++;
9013cab2bb3Spatrick }
902*810390e3Srobert StackID id = StackDepotPut(
9033cab2bb3Spatrick StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
9043cab2bb3Spatrick if (pc != 0)
9053cab2bb3Spatrick thr->shadow_stack_pos--;
9063cab2bb3Spatrick return id;
9073cab2bb3Spatrick }
9083cab2bb3Spatrick
TraceSkipGap(ThreadState * thr)909*810390e3Srobert static bool TraceSkipGap(ThreadState* thr) {
910*810390e3Srobert Trace *trace = &thr->tctx->trace;
911*810390e3Srobert Event *pos = reinterpret_cast<Event *>(atomic_load_relaxed(&thr->trace_pos));
912*810390e3Srobert DCHECK_EQ(reinterpret_cast<uptr>(pos + 1) & TracePart::kAlignment, 0);
913*810390e3Srobert auto *part = trace->parts.Back();
914*810390e3Srobert DPrintf("#%d: TraceSwitchPart enter trace=%p parts=%p-%p pos=%p\n", thr->tid,
915*810390e3Srobert trace, trace->parts.Front(), part, pos);
916*810390e3Srobert if (!part)
917*810390e3Srobert return false;
918*810390e3Srobert // We can get here when we still have space in the current trace part.
919*810390e3Srobert // The fast-path check in TraceAcquire has false positives in the middle of
920*810390e3Srobert // the part. Check if we are indeed at the end of the current part or not,
921*810390e3Srobert // and fill any gaps with NopEvent's.
922*810390e3Srobert Event* end = &part->events[TracePart::kSize];
923*810390e3Srobert DCHECK_GE(pos, &part->events[0]);
924*810390e3Srobert DCHECK_LE(pos, end);
925*810390e3Srobert if (pos + 1 < end) {
926*810390e3Srobert if ((reinterpret_cast<uptr>(pos) & TracePart::kAlignment) ==
927*810390e3Srobert TracePart::kAlignment)
928*810390e3Srobert *pos++ = NopEvent;
929*810390e3Srobert *pos++ = NopEvent;
930*810390e3Srobert DCHECK_LE(pos + 2, end);
931*810390e3Srobert atomic_store_relaxed(&thr->trace_pos, reinterpret_cast<uptr>(pos));
9323cab2bb3Spatrick return true;
9333cab2bb3Spatrick }
934*810390e3Srobert // We are indeed at the end.
935*810390e3Srobert for (; pos < end; pos++) *pos = NopEvent;
9363cab2bb3Spatrick return false;
9373cab2bb3Spatrick }
9383cab2bb3Spatrick
939*810390e3Srobert NOINLINE
TraceSwitchPart(ThreadState * thr)940*810390e3Srobert void TraceSwitchPart(ThreadState* thr) {
941*810390e3Srobert if (TraceSkipGap(thr))
9423cab2bb3Spatrick return;
9433cab2bb3Spatrick #if !SANITIZER_GO
944*810390e3Srobert if (ctx->after_multithreaded_fork) {
945*810390e3Srobert // We just need to survive till exec.
946*810390e3Srobert TracePart* part = thr->tctx->trace.parts.Back();
947*810390e3Srobert if (part) {
948*810390e3Srobert atomic_store_relaxed(&thr->trace_pos,
949*810390e3Srobert reinterpret_cast<uptr>(&part->events[0]));
950*810390e3Srobert return;
951*810390e3Srobert }
952*810390e3Srobert }
9533cab2bb3Spatrick #endif
954*810390e3Srobert TraceSwitchPartImpl(thr);
9553cab2bb3Spatrick }
9563cab2bb3Spatrick
TraceSwitchPartImpl(ThreadState * thr)957*810390e3Srobert void TraceSwitchPartImpl(ThreadState* thr) {
958*810390e3Srobert SlotLocker locker(thr, true);
959*810390e3Srobert Trace* trace = &thr->tctx->trace;
960*810390e3Srobert TracePart* part = TracePartAlloc(thr);
961*810390e3Srobert part->trace = trace;
962*810390e3Srobert thr->trace_prev_pc = 0;
963*810390e3Srobert TracePart* recycle = nullptr;
964*810390e3Srobert // Keep roughly half of parts local to the thread
965*810390e3Srobert // (not queued into the recycle queue).
966*810390e3Srobert uptr local_parts = (Trace::kMinParts + flags()->history_size + 1) / 2;
967*810390e3Srobert {
968*810390e3Srobert Lock lock(&trace->mtx);
969*810390e3Srobert if (trace->parts.Empty())
970*810390e3Srobert trace->local_head = part;
971*810390e3Srobert if (trace->parts.Size() >= local_parts) {
972*810390e3Srobert recycle = trace->local_head;
973*810390e3Srobert trace->local_head = trace->parts.Next(recycle);
974*810390e3Srobert }
975*810390e3Srobert trace->parts.PushBack(part);
976*810390e3Srobert atomic_store_relaxed(&thr->trace_pos,
977*810390e3Srobert reinterpret_cast<uptr>(&part->events[0]));
978*810390e3Srobert }
979*810390e3Srobert // Make this part self-sufficient by restoring the current stack
980*810390e3Srobert // and mutex set in the beginning of the trace.
981*810390e3Srobert TraceTime(thr);
982*810390e3Srobert {
983*810390e3Srobert // Pathologically large stacks may not fit into the part.
984*810390e3Srobert // In these cases we log only fixed number of top frames.
985*810390e3Srobert const uptr kMaxFrames = 1000;
986*810390e3Srobert // Check that kMaxFrames won't consume the whole part.
987*810390e3Srobert static_assert(kMaxFrames < TracePart::kSize / 2, "kMaxFrames is too big");
988*810390e3Srobert uptr* pos = Max(&thr->shadow_stack[0], thr->shadow_stack_pos - kMaxFrames);
989*810390e3Srobert for (; pos < thr->shadow_stack_pos; pos++) {
990*810390e3Srobert if (TryTraceFunc(thr, *pos))
991*810390e3Srobert continue;
992*810390e3Srobert CHECK(TraceSkipGap(thr));
993*810390e3Srobert CHECK(TryTraceFunc(thr, *pos));
994*810390e3Srobert }
995*810390e3Srobert }
996*810390e3Srobert for (uptr i = 0; i < thr->mset.Size(); i++) {
997*810390e3Srobert MutexSet::Desc d = thr->mset.Get(i);
998*810390e3Srobert for (uptr i = 0; i < d.count; i++)
999*810390e3Srobert TraceMutexLock(thr, d.write ? EventType::kLock : EventType::kRLock, 0,
1000*810390e3Srobert d.addr, d.stack_id);
1001*810390e3Srobert }
1002*810390e3Srobert // Callers of TraceSwitchPart expect that TraceAcquire will always succeed
1003*810390e3Srobert // after the call. It's possible that TryTraceFunc/TraceMutexLock above
1004*810390e3Srobert // filled the trace part exactly up to the TracePart::kAlignment gap
1005*810390e3Srobert // and the next TraceAcquire won't succeed. Skip the gap to avoid that.
1006*810390e3Srobert EventFunc *ev;
1007*810390e3Srobert if (!TraceAcquire(thr, &ev)) {
1008*810390e3Srobert CHECK(TraceSkipGap(thr));
1009*810390e3Srobert CHECK(TraceAcquire(thr, &ev));
1010*810390e3Srobert }
1011*810390e3Srobert {
1012*810390e3Srobert Lock lock(&ctx->slot_mtx);
1013*810390e3Srobert // There is a small chance that the slot may be not queued at this point.
1014*810390e3Srobert // This can happen if the slot has kEpochLast epoch and another thread
1015*810390e3Srobert // in FindSlotAndLock discovered that it's exhausted and removed it from
1016*810390e3Srobert // the slot queue. kEpochLast can happen in 2 cases: (1) if TraceSwitchPart
1017*810390e3Srobert // was called with the slot locked and epoch already at kEpochLast,
1018*810390e3Srobert // or (2) if we've acquired a new slot in SlotLock in the beginning
1019*810390e3Srobert // of the function and the slot was at kEpochLast - 1, so after increment
1020*810390e3Srobert // in SlotAttachAndLock it become kEpochLast.
1021*810390e3Srobert if (ctx->slot_queue.Queued(thr->slot)) {
1022*810390e3Srobert ctx->slot_queue.Remove(thr->slot);
1023*810390e3Srobert ctx->slot_queue.PushBack(thr->slot);
1024*810390e3Srobert }
1025*810390e3Srobert if (recycle)
1026*810390e3Srobert ctx->trace_part_recycle.PushBack(recycle);
1027*810390e3Srobert }
1028*810390e3Srobert DPrintf("#%d: TraceSwitchPart exit parts=%p-%p pos=0x%zx\n", thr->tid,
1029*810390e3Srobert trace->parts.Front(), trace->parts.Back(),
1030*810390e3Srobert atomic_load_relaxed(&thr->trace_pos));
10313cab2bb3Spatrick }
10323cab2bb3Spatrick
ThreadIgnoreBegin(ThreadState * thr,uptr pc)1033*810390e3Srobert void ThreadIgnoreBegin(ThreadState* thr, uptr pc) {
10343cab2bb3Spatrick DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
10353cab2bb3Spatrick thr->ignore_reads_and_writes++;
10363cab2bb3Spatrick CHECK_GT(thr->ignore_reads_and_writes, 0);
10373cab2bb3Spatrick thr->fast_state.SetIgnoreBit();
10383cab2bb3Spatrick #if !SANITIZER_GO
1039*810390e3Srobert if (pc && !ctx->after_multithreaded_fork)
10403cab2bb3Spatrick thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
10413cab2bb3Spatrick #endif
10423cab2bb3Spatrick }
10433cab2bb3Spatrick
ThreadIgnoreEnd(ThreadState * thr)1044*810390e3Srobert void ThreadIgnoreEnd(ThreadState *thr) {
10453cab2bb3Spatrick DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
10463cab2bb3Spatrick CHECK_GT(thr->ignore_reads_and_writes, 0);
10473cab2bb3Spatrick thr->ignore_reads_and_writes--;
10483cab2bb3Spatrick if (thr->ignore_reads_and_writes == 0) {
10493cab2bb3Spatrick thr->fast_state.ClearIgnoreBit();
10503cab2bb3Spatrick #if !SANITIZER_GO
10513cab2bb3Spatrick thr->mop_ignore_set.Reset();
10523cab2bb3Spatrick #endif
10533cab2bb3Spatrick }
10543cab2bb3Spatrick }
10553cab2bb3Spatrick
10563cab2bb3Spatrick #if !SANITIZER_GO
10573cab2bb3Spatrick extern "C" SANITIZER_INTERFACE_ATTRIBUTE
__tsan_testonly_shadow_stack_current_size()10583cab2bb3Spatrick uptr __tsan_testonly_shadow_stack_current_size() {
10593cab2bb3Spatrick ThreadState *thr = cur_thread();
10603cab2bb3Spatrick return thr->shadow_stack_pos - thr->shadow_stack;
10613cab2bb3Spatrick }
10623cab2bb3Spatrick #endif
10633cab2bb3Spatrick
ThreadIgnoreSyncBegin(ThreadState * thr,uptr pc)1064*810390e3Srobert void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc) {
10653cab2bb3Spatrick DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
10663cab2bb3Spatrick thr->ignore_sync++;
10673cab2bb3Spatrick CHECK_GT(thr->ignore_sync, 0);
10683cab2bb3Spatrick #if !SANITIZER_GO
1069*810390e3Srobert if (pc && !ctx->after_multithreaded_fork)
10703cab2bb3Spatrick thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
10713cab2bb3Spatrick #endif
10723cab2bb3Spatrick }
10733cab2bb3Spatrick
ThreadIgnoreSyncEnd(ThreadState * thr)1074*810390e3Srobert void ThreadIgnoreSyncEnd(ThreadState *thr) {
10753cab2bb3Spatrick DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
10763cab2bb3Spatrick CHECK_GT(thr->ignore_sync, 0);
10773cab2bb3Spatrick thr->ignore_sync--;
10783cab2bb3Spatrick #if !SANITIZER_GO
10793cab2bb3Spatrick if (thr->ignore_sync == 0)
10803cab2bb3Spatrick thr->sync_ignore_set.Reset();
10813cab2bb3Spatrick #endif
10823cab2bb3Spatrick }
10833cab2bb3Spatrick
operator ==(const MD5Hash & other) const10843cab2bb3Spatrick bool MD5Hash::operator==(const MD5Hash &other) const {
10853cab2bb3Spatrick return hash[0] == other.hash[0] && hash[1] == other.hash[1];
10863cab2bb3Spatrick }
10873cab2bb3Spatrick
10883cab2bb3Spatrick #if SANITIZER_DEBUG
build_consistency_debug()10893cab2bb3Spatrick void build_consistency_debug() {}
10903cab2bb3Spatrick #else
build_consistency_release()10913cab2bb3Spatrick void build_consistency_release() {}
10923cab2bb3Spatrick #endif
10933cab2bb3Spatrick } // namespace __tsan
10943cab2bb3Spatrick
1095d89ec533Spatrick #if SANITIZER_CHECK_DEADLOCKS
1096d89ec533Spatrick namespace __sanitizer {
1097d89ec533Spatrick using namespace __tsan;
1098d89ec533Spatrick MutexMeta mutex_meta[] = {
1099d89ec533Spatrick {MutexInvalid, "Invalid", {}},
1100*810390e3Srobert {MutexThreadRegistry,
1101*810390e3Srobert "ThreadRegistry",
1102*810390e3Srobert {MutexTypeSlots, MutexTypeTrace, MutexTypeReport}},
1103*810390e3Srobert {MutexTypeReport, "Report", {MutexTypeTrace}},
1104*810390e3Srobert {MutexTypeSyncVar, "SyncVar", {MutexTypeReport, MutexTypeTrace}},
1105d89ec533Spatrick {MutexTypeAnnotations, "Annotations", {}},
1106*810390e3Srobert {MutexTypeAtExit, "AtExit", {}},
1107d89ec533Spatrick {MutexTypeFired, "Fired", {MutexLeaf}},
1108d89ec533Spatrick {MutexTypeRacy, "Racy", {MutexLeaf}},
1109*810390e3Srobert {MutexTypeGlobalProc, "GlobalProc", {MutexTypeSlot, MutexTypeSlots}},
1110*810390e3Srobert {MutexTypeInternalAlloc, "InternalAlloc", {MutexLeaf}},
1111*810390e3Srobert {MutexTypeTrace, "Trace", {}},
1112*810390e3Srobert {MutexTypeSlot,
1113*810390e3Srobert "Slot",
1114*810390e3Srobert {MutexMulti, MutexTypeTrace, MutexTypeSyncVar, MutexThreadRegistry,
1115*810390e3Srobert MutexTypeSlots}},
1116*810390e3Srobert {MutexTypeSlots, "Slots", {MutexTypeTrace, MutexTypeReport}},
1117d89ec533Spatrick {},
1118d89ec533Spatrick };
1119d89ec533Spatrick
PrintMutexPC(uptr pc)1120d89ec533Spatrick void PrintMutexPC(uptr pc) { StackTrace(&pc, 1).Print(); }
1121d89ec533Spatrick
1122*810390e3Srobert } // namespace __sanitizer
11233cab2bb3Spatrick #endif
1124