xref: /netbsd-src/external/gpl3/gcc/dist/libsanitizer/tsan/tsan_rtl_thread.cpp (revision ff6d591ca308ed13e9c5ae142cf113a246c2cdc6)
1 //===-- tsan_rtl_thread.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_common/sanitizer_placement_new.h"
14 #include "tsan_rtl.h"
15 #include "tsan_mman.h"
16 #include "tsan_platform.h"
17 #include "tsan_report.h"
18 #include "tsan_sync.h"
19 
20 namespace __tsan {
21 
22 // ThreadContext implementation.
23 
ThreadContext(Tid tid)24 ThreadContext::ThreadContext(Tid tid)
25     : ThreadContextBase(tid), thr(), sync(), epoch0(), epoch1() {}
26 
27 #if !SANITIZER_GO
~ThreadContext()28 ThreadContext::~ThreadContext() {
29 }
30 #endif
31 
OnReset()32 void ThreadContext::OnReset() {
33   CHECK_EQ(sync.size(), 0);
34   uptr trace_p = GetThreadTrace(tid);
35   ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
36   //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
37 }
38 
39 #if !SANITIZER_GO
40 struct ThreadLeak {
41   ThreadContext *tctx;
42   int count;
43 };
44 
CollectThreadLeaks(ThreadContextBase * tctx_base,void * arg)45 static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
46   auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
47   auto *tctx = static_cast<ThreadContext *>(tctx_base);
48   if (tctx->detached || tctx->status != ThreadStatusFinished)
49     return;
50   for (uptr i = 0; i < leaks.Size(); i++) {
51     if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
52       leaks[i].count++;
53       return;
54     }
55   }
56   leaks.PushBack({tctx, 1});
57 }
58 #endif
59 
60 #if !SANITIZER_GO
ReportIgnoresEnabled(ThreadContext * tctx,IgnoreSet * set)61 static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
62   if (tctx->tid == kMainTid) {
63     Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
64   } else {
65     Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
66       " created at:\n", tctx->tid, tctx->name);
67     PrintStack(SymbolizeStackId(tctx->creation_stack_id));
68   }
69   Printf("  One of the following ignores was not ended"
70       " (in order of probability)\n");
71   for (uptr i = 0; i < set->Size(); i++) {
72     Printf("  Ignore was enabled at:\n");
73     PrintStack(SymbolizeStackId(set->At(i)));
74   }
75   Die();
76 }
77 
ThreadCheckIgnore(ThreadState * thr)78 static void ThreadCheckIgnore(ThreadState *thr) {
79   if (ctx->after_multithreaded_fork)
80     return;
81   if (thr->ignore_reads_and_writes)
82     ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
83   if (thr->ignore_sync)
84     ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
85 }
86 #else
ThreadCheckIgnore(ThreadState * thr)87 static void ThreadCheckIgnore(ThreadState *thr) {}
88 #endif
89 
ThreadFinalize(ThreadState * thr)90 void ThreadFinalize(ThreadState *thr) {
91   ThreadCheckIgnore(thr);
92 #if !SANITIZER_GO
93   if (!ShouldReport(thr, ReportTypeThreadLeak))
94     return;
95   ThreadRegistryLock l(&ctx->thread_registry);
96   Vector<ThreadLeak> leaks;
97   ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
98                                                       &leaks);
99   for (uptr i = 0; i < leaks.Size(); i++) {
100     ScopedReport rep(ReportTypeThreadLeak);
101     rep.AddThread(leaks[i].tctx, true);
102     rep.SetCount(leaks[i].count);
103     OutputReport(thr, rep);
104   }
105 #endif
106 }
107 
ThreadCount(ThreadState * thr)108 int ThreadCount(ThreadState *thr) {
109   uptr result;
110   ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
111   return (int)result;
112 }
113 
114 struct OnCreatedArgs {
115   ThreadState *thr;
116   uptr pc;
117 };
118 
ThreadCreate(ThreadState * thr,uptr pc,uptr uid,bool detached)119 Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
120   OnCreatedArgs args = { thr, pc };
121   u32 parent_tid = thr ? thr->tid : kInvalidTid;  // No parent for GCD workers.
122   Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent_tid, &args);
123   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
124   return tid;
125 }
126 
OnCreated(void * arg)127 void ThreadContext::OnCreated(void *arg) {
128   thr = 0;
129   if (tid == kMainTid)
130     return;
131   OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
132   if (!args->thr)  // GCD workers don't have a parent thread.
133     return;
134   args->thr->fast_state.IncrementEpoch();
135   // Can't increment epoch w/o writing to the trace as well.
136   TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
137   ReleaseImpl(args->thr, 0, &sync);
138   creation_stack_id = CurrentStackId(args->thr, args->pc);
139 }
140 
__tsan_stack_initialization()141 extern "C" void __tsan_stack_initialization() {}
142 
143 struct OnStartedArgs {
144   ThreadState *thr;
145   uptr stk_addr;
146   uptr stk_size;
147   uptr tls_addr;
148   uptr tls_size;
149 };
150 
ThreadStart(ThreadState * thr,Tid tid,tid_t os_id,ThreadType thread_type)151 void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
152                  ThreadType thread_type) {
153   uptr stk_addr = 0;
154   uptr stk_size = 0;
155   uptr tls_addr = 0;
156   uptr tls_size = 0;
157 #if !SANITIZER_GO
158   if (thread_type != ThreadType::Fiber)
159     GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
160                          &tls_size);
161 #endif
162 
163   ThreadRegistry *tr = &ctx->thread_registry;
164   OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
165   tr->StartThread(tid, os_id, thread_type, &args);
166 
167   while (!thr->tctx->trace.parts.Empty()) thr->tctx->trace.parts.PopBack();
168 
169 #if !SANITIZER_GO
170   if (ctx->after_multithreaded_fork) {
171     thr->ignore_interceptors++;
172     ThreadIgnoreBegin(thr, 0);
173     ThreadIgnoreSyncBegin(thr, 0);
174   }
175 #endif
176 
177 #if !SANITIZER_GO
178   // Don't imitate stack/TLS writes for the main thread,
179   // because its initialization is synchronized with all
180   // subsequent threads anyway.
181   if (tid != kMainTid) {
182     if (stk_addr && stk_size) {
183       const uptr pc = StackTrace::GetNextInstructionPc(
184           reinterpret_cast<uptr>(__tsan_stack_initialization));
185       MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
186     }
187 
188     if (tls_addr && tls_size)
189       ImitateTlsWrite(thr, tls_addr, tls_size);
190   }
191 #endif
192 }
193 
OnStarted(void * arg)194 void ThreadContext::OnStarted(void *arg) {
195   OnStartedArgs *args = static_cast<OnStartedArgs *>(arg);
196   thr = args->thr;
197   // RoundUp so that one trace part does not contain events
198   // from different threads.
199   epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
200   epoch1 = (u64)-1;
201   new (thr)
202       ThreadState(ctx, tid, unique_id, epoch0, reuse_count, args->stk_addr,
203                   args->stk_size, args->tls_addr, args->tls_size);
204   if (common_flags()->detect_deadlocks)
205     thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
206   thr->fast_state.SetHistorySize(flags()->history_size);
207   // Commit switch to the new part of the trace.
208   // TraceAddEvent will reset stack0/mset0 in the new part for us.
209   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
210 
211   thr->fast_synch_epoch = epoch0;
212   AcquireImpl(thr, 0, &sync);
213   sync.Reset(&thr->proc()->clock_cache);
214   thr->tctx = this;
215   thr->is_inited = true;
216   DPrintf(
217       "#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
218       "tls_addr=%zx tls_size=%zx\n",
219       tid, (uptr)epoch0, args->stk_addr, args->stk_size, args->tls_addr,
220       args->tls_size);
221 }
222 
ThreadFinish(ThreadState * thr)223 void ThreadFinish(ThreadState *thr) {
224   ThreadCheckIgnore(thr);
225   if (thr->stk_addr && thr->stk_size)
226     DontNeedShadowFor(thr->stk_addr, thr->stk_size);
227   if (thr->tls_addr && thr->tls_size)
228     DontNeedShadowFor(thr->tls_addr, thr->tls_size);
229   thr->is_dead = true;
230   ctx->thread_registry.FinishThread(thr->tid);
231 }
232 
OnFinished()233 void ThreadContext::OnFinished() {
234 #if SANITIZER_GO
235   Free(thr->shadow_stack);
236   thr->shadow_stack_pos = nullptr;
237   thr->shadow_stack_end = nullptr;
238 #endif
239   if (!detached) {
240     thr->fast_state.IncrementEpoch();
241     // Can't increment epoch w/o writing to the trace as well.
242     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
243     ReleaseImpl(thr, 0, &sync);
244   }
245   epoch1 = thr->fast_state.epoch();
246 
247   if (common_flags()->detect_deadlocks)
248     ctx->dd->DestroyLogicalThread(thr->dd_lt);
249   thr->clock.ResetCached(&thr->proc()->clock_cache);
250 #if !SANITIZER_GO
251   thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
252 #endif
253 #if !SANITIZER_GO
254   PlatformCleanUpThreadState(thr);
255 #endif
256   thr->~ThreadState();
257   thr = 0;
258 }
259 
260 struct ConsumeThreadContext {
261   uptr uid;
262   ThreadContextBase *tctx;
263 };
264 
ConsumeThreadByUid(ThreadContextBase * tctx,void * arg)265 static bool ConsumeThreadByUid(ThreadContextBase *tctx, void *arg) {
266   ConsumeThreadContext *findCtx = (ConsumeThreadContext *)arg;
267   if (tctx->user_id == findCtx->uid && tctx->status != ThreadStatusInvalid) {
268     if (findCtx->tctx) {
269       // Ensure that user_id is unique. If it's not the case we are screwed.
270       // Something went wrong before, but now there is no way to recover.
271       // Returning a wrong thread is not an option, it may lead to very hard
272       // to debug false positives (e.g. if we join a wrong thread).
273       Report("ThreadSanitizer: dup thread with used id 0x%zx\n", findCtx->uid);
274       Die();
275     }
276     findCtx->tctx = tctx;
277     tctx->user_id = 0;
278   }
279   return false;
280 }
281 
ThreadConsumeTid(ThreadState * thr,uptr pc,uptr uid)282 Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
283   ConsumeThreadContext findCtx = {uid, nullptr};
284   ctx->thread_registry.FindThread(ConsumeThreadByUid, &findCtx);
285   Tid tid = findCtx.tctx ? findCtx.tctx->tid : kInvalidTid;
286   DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, tid);
287   return tid;
288 }
289 
ThreadJoin(ThreadState * thr,uptr pc,Tid tid)290 void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
291   CHECK_GT(tid, 0);
292   CHECK_LT(tid, kMaxTid);
293   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
294   ctx->thread_registry.JoinThread(tid, thr);
295 }
296 
OnJoined(void * arg)297 void ThreadContext::OnJoined(void *arg) {
298   ThreadState *caller_thr = static_cast<ThreadState *>(arg);
299   AcquireImpl(caller_thr, 0, &sync);
300   sync.Reset(&caller_thr->proc()->clock_cache);
301 }
302 
OnDead()303 void ThreadContext::OnDead() { CHECK_EQ(sync.size(), 0); }
304 
ThreadDetach(ThreadState * thr,uptr pc,Tid tid)305 void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
306   CHECK_GT(tid, 0);
307   CHECK_LT(tid, kMaxTid);
308   ctx->thread_registry.DetachThread(tid, thr);
309 }
310 
OnDetached(void * arg)311 void ThreadContext::OnDetached(void *arg) {
312   ThreadState *thr1 = static_cast<ThreadState *>(arg);
313   sync.Reset(&thr1->proc()->clock_cache);
314 }
315 
ThreadNotJoined(ThreadState * thr,uptr pc,Tid tid,uptr uid)316 void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
317   CHECK_GT(tid, 0);
318   CHECK_LT(tid, kMaxTid);
319   ctx->thread_registry.SetThreadUserId(tid, uid);
320 }
321 
ThreadSetName(ThreadState * thr,const char * name)322 void ThreadSetName(ThreadState *thr, const char *name) {
323   ctx->thread_registry.SetThreadName(thr->tid, name);
324 }
325 
326 #if !SANITIZER_GO
FiberSwitchImpl(ThreadState * from,ThreadState * to)327 void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
328   Processor *proc = from->proc();
329   ProcUnwire(proc, from);
330   ProcWire(proc, to);
331   set_cur_thread(to);
332 }
333 
FiberCreate(ThreadState * thr,uptr pc,unsigned flags)334 ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
335   void *mem = Alloc(sizeof(ThreadState));
336   ThreadState *fiber = static_cast<ThreadState *>(mem);
337   internal_memset(fiber, 0, sizeof(*fiber));
338   Tid tid = ThreadCreate(thr, pc, 0, true);
339   FiberSwitchImpl(thr, fiber);
340   ThreadStart(fiber, tid, 0, ThreadType::Fiber);
341   FiberSwitchImpl(fiber, thr);
342   return fiber;
343 }
344 
FiberDestroy(ThreadState * thr,uptr pc,ThreadState * fiber)345 void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
346   FiberSwitchImpl(thr, fiber);
347   ThreadFinish(fiber);
348   FiberSwitchImpl(fiber, thr);
349   Free(fiber);
350 }
351 
FiberSwitch(ThreadState * thr,uptr pc,ThreadState * fiber,unsigned flags)352 void FiberSwitch(ThreadState *thr, uptr pc,
353                  ThreadState *fiber, unsigned flags) {
354   if (!(flags & FiberSwitchFlagNoSync))
355     Release(thr, pc, (uptr)fiber);
356   FiberSwitchImpl(thr, fiber);
357   if (!(flags & FiberSwitchFlagNoSync))
358     Acquire(fiber, pc, (uptr)fiber);
359 }
360 #endif
361 
362 }  // namespace __tsan
363