xref: /netbsd-src/external/gpl3/gcc/dist/libsanitizer/tsan/tsan_interceptors_posix.cpp (revision 7da4285b3e1101ea70200f6048bb00a5a00de189)
1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_atomic.h"
16 #include "sanitizer_common/sanitizer_errno.h"
17 #include "sanitizer_common/sanitizer_libc.h"
18 #include "sanitizer_common/sanitizer_linux.h"
19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
20 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_posix.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_tls_get_addr.h"
25 #include "interception/interception.h"
26 #include "tsan_interceptors.h"
27 #include "tsan_interface.h"
28 #include "tsan_platform.h"
29 #include "tsan_suppressions.h"
30 #include "tsan_rtl.h"
31 #include "tsan_mman.h"
32 #include "tsan_fd.h"
33 
34 #include <stdarg.h>
35 
36 using namespace __tsan;
37 
38 #if SANITIZER_FREEBSD || SANITIZER_MAC
39 #define stdout __stdoutp
40 #define stderr __stderrp
41 #endif
42 
43 #if SANITIZER_NETBSD
44 #define dirfd(dirp) (*(int *)(dirp))
45 #define fileno_unlocked(fp)              \
46   (((__sanitizer_FILE *)fp)->_file == -1 \
47        ? -1                              \
48        : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
49 
50 #define stdout ((__sanitizer_FILE*)&__sF[1])
51 #define stderr ((__sanitizer_FILE*)&__sF[2])
52 
53 #define nanosleep __nanosleep50
54 #define vfork __vfork14
55 #endif
56 
57 #ifdef __mips__
58 const int kSigCount = 129;
59 #else
60 const int kSigCount = 65;
61 #endif
62 
63 #ifdef __mips__
64 struct ucontext_t {
65   u64 opaque[768 / sizeof(u64) + 1];
66 };
67 #else
68 struct ucontext_t {
69   // The size is determined by looking at sizeof of real ucontext_t on linux.
70   u64 opaque[936 / sizeof(u64) + 1];
71 };
72 #endif
73 
74 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
75     defined(__s390x__)
76 #define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
77 #elif defined(__aarch64__) || SANITIZER_PPC64V2
78 #define PTHREAD_ABI_BASE  "GLIBC_2.17"
79 #endif
80 
81 extern "C" int pthread_attr_init(void *attr);
82 extern "C" int pthread_attr_destroy(void *attr);
83 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
84 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
85 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
86                               void (*child)(void));
87 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
88 extern "C" int pthread_setspecific(unsigned key, const void *v);
89 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
90 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
91 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
92 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
93 extern "C" void *pthread_self();
94 extern "C" void _exit(int status);
95 #if !SANITIZER_NETBSD
96 extern "C" int fileno_unlocked(void *stream);
97 extern "C" int dirfd(void *dirp);
98 #endif
99 #if SANITIZER_NETBSD
100 extern __sanitizer_FILE __sF[];
101 #else
102 extern __sanitizer_FILE *stdout, *stderr;
103 #endif
104 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
105 const int PTHREAD_MUTEX_RECURSIVE = 1;
106 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
107 #else
108 const int PTHREAD_MUTEX_RECURSIVE = 2;
109 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
110 #endif
111 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
112 const int EPOLL_CTL_ADD = 1;
113 #endif
114 const int SIGILL = 4;
115 const int SIGTRAP = 5;
116 const int SIGABRT = 6;
117 const int SIGFPE = 8;
118 const int SIGSEGV = 11;
119 const int SIGPIPE = 13;
120 const int SIGTERM = 15;
121 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
122 const int SIGBUS = 10;
123 const int SIGSYS = 12;
124 #else
125 const int SIGBUS = 7;
126 const int SIGSYS = 31;
127 #endif
128 void *const MAP_FAILED = (void*)-1;
129 #if SANITIZER_NETBSD
130 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
131 #elif !SANITIZER_MAC
132 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
133 #endif
134 const int MAP_FIXED = 0x10;
135 typedef long long_t;
136 typedef __sanitizer::u16 mode_t;
137 
138 // From /usr/include/unistd.h
139 # define F_ULOCK 0      /* Unlock a previously locked region.  */
140 # define F_LOCK  1      /* Lock a region for exclusive use.  */
141 # define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
142 # define F_TEST  3      /* Test a region for other processes locks.  */
143 
144 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
145 const int SA_SIGINFO = 0x40;
146 const int SIG_SETMASK = 3;
147 #elif defined(__mips__)
148 const int SA_SIGINFO = 8;
149 const int SIG_SETMASK = 3;
150 #else
151 const int SA_SIGINFO = 4;
152 const int SIG_SETMASK = 2;
153 #endif
154 
155 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
156   (!cur_thread_init()->is_inited)
157 
158 namespace __tsan {
159 struct SignalDesc {
160   bool armed;
161   __sanitizer_siginfo siginfo;
162   ucontext_t ctx;
163 };
164 
165 struct ThreadSignalContext {
166   int int_signal_send;
167   atomic_uintptr_t in_blocking_func;
168   SignalDesc pending_signals[kSigCount];
169   // emptyset and oldset are too big for stack.
170   __sanitizer_sigset_t emptyset;
171   __sanitizer_sigset_t oldset;
172 };
173 
174 // The sole reason tsan wraps atexit callbacks is to establish synchronization
175 // between callback setup and callback execution.
176 struct AtExitCtx {
177   void (*f)();
178   void *arg;
179 };
180 
181 // InterceptorContext holds all global data required for interceptors.
182 // It's explicitly constructed in InitializeInterceptors with placement new
183 // and is never destroyed. This allows usage of members with non-trivial
184 // constructors and destructors.
185 struct InterceptorContext {
186   // The object is 64-byte aligned, because we want hot data to be located
187   // in a single cache line if possible (it's accessed in every interceptor).
188   ALIGNED(64) LibIgnore libignore;
189   __sanitizer_sigaction sigactions[kSigCount];
190 #if !SANITIZER_MAC && !SANITIZER_NETBSD
191   unsigned finalize_key;
192 #endif
193 
194   Mutex atexit_mu;
195   Vector<struct AtExitCtx *> AtExitStack;
196 
InterceptorContext__tsan::InterceptorContext197   InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
198 };
199 
200 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
interceptor_ctx()201 InterceptorContext *interceptor_ctx() {
202   return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
203 }
204 
libignore()205 LibIgnore *libignore() {
206   return &interceptor_ctx()->libignore;
207 }
208 
InitializeLibIgnore()209 void InitializeLibIgnore() {
210   const SuppressionContext &supp = *Suppressions();
211   const uptr n = supp.SuppressionCount();
212   for (uptr i = 0; i < n; i++) {
213     const Suppression *s = supp.SuppressionAt(i);
214     if (0 == internal_strcmp(s->type, kSuppressionLib))
215       libignore()->AddIgnoredLibrary(s->templ);
216   }
217   if (flags()->ignore_noninstrumented_modules)
218     libignore()->IgnoreNoninstrumentedModules(true);
219   libignore()->OnLibraryLoaded(0);
220 }
221 
222 // The following two hooks can be used by for cooperative scheduling when
223 // locking.
224 #ifdef TSAN_EXTERNAL_HOOKS
225 void OnPotentiallyBlockingRegionBegin();
226 void OnPotentiallyBlockingRegionEnd();
227 #else
OnPotentiallyBlockingRegionBegin()228 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
OnPotentiallyBlockingRegionEnd()229 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
230 #endif
231 
232 }  // namespace __tsan
233 
SigCtx(ThreadState * thr)234 static ThreadSignalContext *SigCtx(ThreadState *thr) {
235   ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
236   if (ctx == 0 && !thr->is_dead) {
237     ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
238     MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
239     thr->signal_ctx = ctx;
240   }
241   return ctx;
242 }
243 
ScopedInterceptor(ThreadState * thr,const char * fname,uptr pc)244 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
245                                      uptr pc)
246     : thr_(thr), in_ignored_lib_(false), ignoring_(false) {
247   LazyInitialize(thr);
248   if (!thr_->is_inited) return;
249   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
250   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
251   ignoring_ =
252       !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
253                                 libignore()->IsIgnored(pc, &in_ignored_lib_));
254   EnableIgnores();
255 }
256 
~ScopedInterceptor()257 ScopedInterceptor::~ScopedInterceptor() {
258   if (!thr_->is_inited) return;
259   DisableIgnores();
260   if (!thr_->ignore_interceptors) {
261     ProcessPendingSignals(thr_);
262     FuncExit(thr_);
263     CheckedMutex::CheckNoLocks();
264   }
265 }
266 
267 NOINLINE
EnableIgnoresImpl()268 void ScopedInterceptor::EnableIgnoresImpl() {
269   ThreadIgnoreBegin(thr_, 0);
270   if (flags()->ignore_noninstrumented_modules)
271     thr_->suppress_reports++;
272   if (in_ignored_lib_) {
273     DCHECK(!thr_->in_ignored_lib);
274     thr_->in_ignored_lib = true;
275   }
276 }
277 
278 NOINLINE
DisableIgnoresImpl()279 void ScopedInterceptor::DisableIgnoresImpl() {
280   ThreadIgnoreEnd(thr_);
281   if (flags()->ignore_noninstrumented_modules)
282     thr_->suppress_reports--;
283   if (in_ignored_lib_) {
284     DCHECK(thr_->in_ignored_lib);
285     thr_->in_ignored_lib = false;
286   }
287 }
288 
289 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
290 #if SANITIZER_FREEBSD
291 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
292 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
293 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
294 #elif SANITIZER_NETBSD
295 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
296 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
297          INTERCEPT_FUNCTION(__libc_##func)
298 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
299          INTERCEPT_FUNCTION(__libc_thr_##func)
300 #else
301 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
302 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
303 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
304 #endif
305 
306 #define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
307   MemoryAccessRange((thr), (pc), (uptr)(s),                         \
308     common_flags()->strict_string_checks ? (len) + 1 : (n), false)
309 
310 #define READ_STRING(thr, pc, s, n)                             \
311     READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
312 
313 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
314 
315 struct BlockingCall {
BlockingCallBlockingCall316   explicit BlockingCall(ThreadState *thr)
317       : thr(thr)
318       , ctx(SigCtx(thr)) {
319     for (;;) {
320       atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
321       if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
322         break;
323       atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
324       ProcessPendingSignals(thr);
325     }
326     // When we are in a "blocking call", we process signals asynchronously
327     // (right when they arrive). In this context we do not expect to be
328     // executing any user/runtime code. The known interceptor sequence when
329     // this is not true is: pthread_join -> munmap(stack). It's fine
330     // to ignore munmap in this case -- we handle stack shadow separately.
331     thr->ignore_interceptors++;
332   }
333 
~BlockingCallBlockingCall334   ~BlockingCall() {
335     thr->ignore_interceptors--;
336     atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
337   }
338 
339   ThreadState *thr;
340   ThreadSignalContext *ctx;
341 };
342 
TSAN_INTERCEPTOR(unsigned,sleep,unsigned sec)343 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
344   SCOPED_TSAN_INTERCEPTOR(sleep, sec);
345   unsigned res = BLOCK_REAL(sleep)(sec);
346   AfterSleep(thr, pc);
347   return res;
348 }
349 
TSAN_INTERCEPTOR(int,usleep,long_t usec)350 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
351   SCOPED_TSAN_INTERCEPTOR(usleep, usec);
352   int res = BLOCK_REAL(usleep)(usec);
353   AfterSleep(thr, pc);
354   return res;
355 }
356 
TSAN_INTERCEPTOR(int,nanosleep,void * req,void * rem)357 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
358   SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
359   int res = BLOCK_REAL(nanosleep)(req, rem);
360   AfterSleep(thr, pc);
361   return res;
362 }
363 
TSAN_INTERCEPTOR(int,pause,int fake)364 TSAN_INTERCEPTOR(int, pause, int fake) {
365   SCOPED_TSAN_INTERCEPTOR(pause, fake);
366   return BLOCK_REAL(pause)(fake);
367 }
368 
at_exit_wrapper()369 static void at_exit_wrapper() {
370   AtExitCtx *ctx;
371   {
372     // Ensure thread-safety.
373     Lock l(&interceptor_ctx()->atexit_mu);
374 
375     // Pop AtExitCtx from the top of the stack of callback functions
376     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
377     ctx = interceptor_ctx()->AtExitStack[element];
378     interceptor_ctx()->AtExitStack.PopBack();
379   }
380 
381   Acquire(cur_thread(), (uptr)0, (uptr)ctx);
382   ((void(*)())ctx->f)();
383   Free(ctx);
384 }
385 
cxa_at_exit_wrapper(void * arg)386 static void cxa_at_exit_wrapper(void *arg) {
387   Acquire(cur_thread(), 0, (uptr)arg);
388   AtExitCtx *ctx = (AtExitCtx*)arg;
389   ((void(*)(void *arg))ctx->f)(ctx->arg);
390   Free(ctx);
391 }
392 
393 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
394       void *arg, void *dso);
395 
396 #if !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,atexit,void (* f)())397 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
398   if (in_symbolizer())
399     return 0;
400   // We want to setup the atexit callback even if we are in ignored lib
401   // or after fork.
402   SCOPED_INTERCEPTOR_RAW(atexit, f);
403   return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
404 }
405 #endif
406 
TSAN_INTERCEPTOR(int,__cxa_atexit,void (* f)(void * a),void * arg,void * dso)407 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
408   if (in_symbolizer())
409     return 0;
410   SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
411   return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
412 }
413 
setup_at_exit_wrapper(ThreadState * thr,uptr pc,void (* f)(),void * arg,void * dso)414 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
415       void *arg, void *dso) {
416   auto *ctx = New<AtExitCtx>();
417   ctx->f = f;
418   ctx->arg = arg;
419   Release(thr, pc, (uptr)ctx);
420   // Memory allocation in __cxa_atexit will race with free during exit,
421   // because we do not see synchronization around atexit callback list.
422   ThreadIgnoreBegin(thr, pc);
423   int res;
424   if (!dso) {
425     // NetBSD does not preserve the 2nd argument if dso is equal to 0
426     // Store ctx in a local stack-like structure
427 
428     // Ensure thread-safety.
429     Lock l(&interceptor_ctx()->atexit_mu);
430     // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
431     // due to atexit_mu held on exit from the calloc interceptor.
432     ScopedIgnoreInterceptors ignore;
433 
434     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
435     // Push AtExitCtx on the top of the stack of callback functions
436     if (!res) {
437       interceptor_ctx()->AtExitStack.PushBack(ctx);
438     }
439   } else {
440     res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
441   }
442   ThreadIgnoreEnd(thr);
443   return res;
444 }
445 
446 #if !SANITIZER_MAC && !SANITIZER_NETBSD
on_exit_wrapper(int status,void * arg)447 static void on_exit_wrapper(int status, void *arg) {
448   ThreadState *thr = cur_thread();
449   uptr pc = 0;
450   Acquire(thr, pc, (uptr)arg);
451   AtExitCtx *ctx = (AtExitCtx*)arg;
452   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
453   Free(ctx);
454 }
455 
TSAN_INTERCEPTOR(int,on_exit,void (* f)(int,void *),void * arg)456 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
457   if (in_symbolizer())
458     return 0;
459   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
460   auto *ctx = New<AtExitCtx>();
461   ctx->f = (void(*)())f;
462   ctx->arg = arg;
463   Release(thr, pc, (uptr)ctx);
464   // Memory allocation in __cxa_atexit will race with free during exit,
465   // because we do not see synchronization around atexit callback list.
466   ThreadIgnoreBegin(thr, pc);
467   int res = REAL(on_exit)(on_exit_wrapper, ctx);
468   ThreadIgnoreEnd(thr);
469   return res;
470 }
471 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
472 #else
473 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
474 #endif
475 
476 // Cleanup old bufs.
JmpBufGarbageCollect(ThreadState * thr,uptr sp)477 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
478   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
479     JmpBuf *buf = &thr->jmp_bufs[i];
480     if (buf->sp <= sp) {
481       uptr sz = thr->jmp_bufs.Size();
482       internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
483       thr->jmp_bufs.PopBack();
484       i--;
485     }
486   }
487 }
488 
SetJmp(ThreadState * thr,uptr sp)489 static void SetJmp(ThreadState *thr, uptr sp) {
490   if (!thr->is_inited)  // called from libc guts during bootstrap
491     return;
492   // Cleanup old bufs.
493   JmpBufGarbageCollect(thr, sp);
494   // Remember the buf.
495   JmpBuf *buf = thr->jmp_bufs.PushBack();
496   buf->sp = sp;
497   buf->shadow_stack_pos = thr->shadow_stack_pos;
498   ThreadSignalContext *sctx = SigCtx(thr);
499   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
500   buf->in_blocking_func = sctx ?
501       atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
502       false;
503   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
504       memory_order_relaxed);
505 }
506 
LongJmp(ThreadState * thr,uptr * env)507 static void LongJmp(ThreadState *thr, uptr *env) {
508   uptr sp = ExtractLongJmpSp(env);
509   // Find the saved buf with matching sp.
510   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
511     JmpBuf *buf = &thr->jmp_bufs[i];
512     if (buf->sp == sp) {
513       CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
514       // Unwind the stack.
515       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
516         FuncExit(thr);
517       ThreadSignalContext *sctx = SigCtx(thr);
518       if (sctx) {
519         sctx->int_signal_send = buf->int_signal_send;
520         atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
521             memory_order_relaxed);
522       }
523       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
524           memory_order_relaxed);
525       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
526       return;
527     }
528   }
529   Printf("ThreadSanitizer: can't find longjmp buf\n");
530   CHECK(0);
531 }
532 
533 // FIXME: put everything below into a common extern "C" block?
__tsan_setjmp(uptr sp)534 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
535 
536 #if SANITIZER_MAC
537 TSAN_INTERCEPTOR(int, setjmp, void *env);
538 TSAN_INTERCEPTOR(int, _setjmp, void *env);
539 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
540 #else  // SANITIZER_MAC
541 
542 #if SANITIZER_NETBSD
543 #define setjmp_symname __setjmp14
544 #define sigsetjmp_symname __sigsetjmp14
545 #else
546 #define setjmp_symname setjmp
547 #define sigsetjmp_symname sigsetjmp
548 #endif
549 
550 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
551 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
552 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
553 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
554 
555 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
556 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
557 
558 // Not called.  Merely to satisfy TSAN_INTERCEPT().
559 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
560 int TSAN_INTERCEPTOR_SETJMP(void *env);
TSAN_INTERCEPTOR_SETJMP(void * env)561 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
562   CHECK(0);
563   return 0;
564 }
565 
566 // FIXME: any reason to have a separate declaration?
567 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
568 int __interceptor__setjmp(void *env);
__interceptor__setjmp(void * env)569 extern "C" int __interceptor__setjmp(void *env) {
570   CHECK(0);
571   return 0;
572 }
573 
574 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
575 int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
TSAN_INTERCEPTOR_SIGSETJMP(void * env)576 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
577   CHECK(0);
578   return 0;
579 }
580 
581 #if !SANITIZER_NETBSD
582 extern "C" SANITIZER_INTERFACE_ATTRIBUTE
583 int __interceptor___sigsetjmp(void *env);
__interceptor___sigsetjmp(void * env)584 extern "C" int __interceptor___sigsetjmp(void *env) {
585   CHECK(0);
586   return 0;
587 }
588 #endif
589 
590 extern "C" int setjmp_symname(void *env);
591 extern "C" int _setjmp(void *env);
592 extern "C" int sigsetjmp_symname(void *env);
593 #if !SANITIZER_NETBSD
594 extern "C" int __sigsetjmp(void *env);
595 #endif
DEFINE_REAL(int,setjmp_symname,void * env)596 DEFINE_REAL(int, setjmp_symname, void *env)
597 DEFINE_REAL(int, _setjmp, void *env)
598 DEFINE_REAL(int, sigsetjmp_symname, void *env)
599 #if !SANITIZER_NETBSD
600 DEFINE_REAL(int, __sigsetjmp, void *env)
601 #endif
602 #endif  // SANITIZER_MAC
603 
604 #if SANITIZER_NETBSD
605 #define longjmp_symname __longjmp14
606 #define siglongjmp_symname __siglongjmp14
607 #else
608 #define longjmp_symname longjmp
609 #define siglongjmp_symname siglongjmp
610 #endif
611 
612 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
613   // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
614   // bad things will happen. We will jump over ScopedInterceptor dtor and can
615   // leave thr->in_ignored_lib set.
616   {
617     SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
618   }
619   LongJmp(cur_thread(), env);
620   REAL(longjmp_symname)(env, val);
621 }
622 
TSAN_INTERCEPTOR(void,siglongjmp_symname,uptr * env,int val)623 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
624   {
625     SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
626   }
627   LongJmp(cur_thread(), env);
628   REAL(siglongjmp_symname)(env, val);
629 }
630 
631 #if SANITIZER_NETBSD
TSAN_INTERCEPTOR(void,_longjmp,uptr * env,int val)632 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
633   {
634     SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
635   }
636   LongJmp(cur_thread(), env);
637   REAL(_longjmp)(env, val);
638 }
639 #endif
640 
641 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(void *,malloc,uptr size)642 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
643   if (in_symbolizer())
644     return InternalAlloc(size);
645   void *p = 0;
646   {
647     SCOPED_INTERCEPTOR_RAW(malloc, size);
648     p = user_alloc(thr, pc, size);
649   }
650   invoke_malloc_hook(p, size);
651   return p;
652 }
653 
654 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
655 // __libc_memalign so that (1) we can detect races (2) free will not be called
656 // on libc internally allocated blocks.
TSAN_INTERCEPTOR(void *,__libc_memalign,uptr align,uptr sz)657 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
658   SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
659   return user_memalign(thr, pc, align, sz);
660 }
661 
TSAN_INTERCEPTOR(void *,calloc,uptr size,uptr n)662 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
663   if (in_symbolizer())
664     return InternalCalloc(size, n);
665   void *p = 0;
666   {
667     SCOPED_INTERCEPTOR_RAW(calloc, size, n);
668     p = user_calloc(thr, pc, size, n);
669   }
670   invoke_malloc_hook(p, n * size);
671   return p;
672 }
673 
TSAN_INTERCEPTOR(void *,realloc,void * p,uptr size)674 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
675   if (in_symbolizer())
676     return InternalRealloc(p, size);
677   if (p)
678     invoke_free_hook(p);
679   {
680     SCOPED_INTERCEPTOR_RAW(realloc, p, size);
681     p = user_realloc(thr, pc, p, size);
682   }
683   invoke_malloc_hook(p, size);
684   return p;
685 }
686 
TSAN_INTERCEPTOR(void *,reallocarray,void * p,uptr size,uptr n)687 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
688   if (in_symbolizer())
689     return InternalReallocArray(p, size, n);
690   if (p)
691     invoke_free_hook(p);
692   {
693     SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
694     p = user_reallocarray(thr, pc, p, size, n);
695   }
696   invoke_malloc_hook(p, size);
697   return p;
698 }
699 
TSAN_INTERCEPTOR(void,free,void * p)700 TSAN_INTERCEPTOR(void, free, void *p) {
701   if (p == 0)
702     return;
703   if (in_symbolizer())
704     return InternalFree(p);
705   invoke_free_hook(p);
706   SCOPED_INTERCEPTOR_RAW(free, p);
707   user_free(thr, pc, p);
708 }
709 
TSAN_INTERCEPTOR(void,cfree,void * p)710 TSAN_INTERCEPTOR(void, cfree, void *p) {
711   if (p == 0)
712     return;
713   if (in_symbolizer())
714     return InternalFree(p);
715   invoke_free_hook(p);
716   SCOPED_INTERCEPTOR_RAW(cfree, p);
717   user_free(thr, pc, p);
718 }
719 
TSAN_INTERCEPTOR(uptr,malloc_usable_size,void * p)720 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
721   SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
722   return user_alloc_usable_size(p);
723 }
724 #endif
725 
TSAN_INTERCEPTOR(char *,strcpy,char * dst,const char * src)726 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
727   SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
728   uptr srclen = internal_strlen(src);
729   MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
730   MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
731   return REAL(strcpy)(dst, src);
732 }
733 
TSAN_INTERCEPTOR(char *,strncpy,char * dst,char * src,uptr n)734 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
735   SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
736   uptr srclen = internal_strnlen(src, n);
737   MemoryAccessRange(thr, pc, (uptr)dst, n, true);
738   MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
739   return REAL(strncpy)(dst, src, n);
740 }
741 
TSAN_INTERCEPTOR(char *,strdup,const char * str)742 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
743   SCOPED_TSAN_INTERCEPTOR(strdup, str);
744   // strdup will call malloc, so no instrumentation is required here.
745   return REAL(strdup)(str);
746 }
747 
748 // Zero out addr if it points into shadow memory and was provided as a hint
749 // only, i.e., MAP_FIXED is not set.
fix_mmap_addr(void ** addr,long_t sz,int flags)750 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
751   if (*addr) {
752     if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
753       if (flags & MAP_FIXED) {
754         errno = errno_EINVAL;
755         return false;
756       } else {
757         *addr = 0;
758       }
759     }
760   }
761   return true;
762 }
763 
764 template <class Mmap>
mmap_interceptor(ThreadState * thr,uptr pc,Mmap real_mmap,void * addr,SIZE_T sz,int prot,int flags,int fd,OFF64_T off)765 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
766                               void *addr, SIZE_T sz, int prot, int flags,
767                               int fd, OFF64_T off) {
768   if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
769   void *res = real_mmap(addr, sz, prot, flags, fd, off);
770   if (res != MAP_FAILED) {
771     if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
772       Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
773              addr, (void*)sz, res);
774       Die();
775     }
776     if (fd > 0) FdAccess(thr, pc, fd);
777     MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
778   }
779   return res;
780 }
781 
TSAN_INTERCEPTOR(int,munmap,void * addr,long_t sz)782 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
783   SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
784   UnmapShadow(thr, (uptr)addr, sz);
785   int res = REAL(munmap)(addr, sz);
786   return res;
787 }
788 
789 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,memalign,uptr align,uptr sz)790 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
791   SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
792   return user_memalign(thr, pc, align, sz);
793 }
794 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
795 #else
796 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
797 #endif
798 
799 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(void *,aligned_alloc,uptr align,uptr sz)800 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
801   if (in_symbolizer())
802     return InternalAlloc(sz, nullptr, align);
803   SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
804   return user_aligned_alloc(thr, pc, align, sz);
805 }
806 
TSAN_INTERCEPTOR(void *,valloc,uptr sz)807 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
808   if (in_symbolizer())
809     return InternalAlloc(sz, nullptr, GetPageSizeCached());
810   SCOPED_INTERCEPTOR_RAW(valloc, sz);
811   return user_valloc(thr, pc, sz);
812 }
813 #endif
814 
815 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,pvalloc,uptr sz)816 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
817   if (in_symbolizer()) {
818     uptr PageSize = GetPageSizeCached();
819     sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
820     return InternalAlloc(sz, nullptr, PageSize);
821   }
822   SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
823   return user_pvalloc(thr, pc, sz);
824 }
825 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
826 #else
827 #define TSAN_MAYBE_INTERCEPT_PVALLOC
828 #endif
829 
830 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,posix_memalign,void ** memptr,uptr align,uptr sz)831 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
832   if (in_symbolizer()) {
833     void *p = InternalAlloc(sz, nullptr, align);
834     if (!p)
835       return errno_ENOMEM;
836     *memptr = p;
837     return 0;
838   }
839   SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
840   return user_posix_memalign(thr, pc, memptr, align, sz);
841 }
842 #endif
843 
844 // Both __cxa_guard_acquire and pthread_once 0-initialize
845 // the object initially. pthread_once does not have any
846 // other ABI requirements. __cxa_guard_acquire assumes
847 // that any non-0 value in the first byte means that
848 // initialization is completed. Contents of the remaining
849 // bytes are up to us.
850 constexpr u32 kGuardInit = 0;
851 constexpr u32 kGuardDone = 1;
852 constexpr u32 kGuardRunning = 1 << 16;
853 constexpr u32 kGuardWaiter = 1 << 17;
854 
guard_acquire(ThreadState * thr,uptr pc,atomic_uint32_t * g,bool blocking_hooks=true)855 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
856                          bool blocking_hooks = true) {
857   if (blocking_hooks)
858     OnPotentiallyBlockingRegionBegin();
859   auto on_exit = at_scope_exit([blocking_hooks] {
860     if (blocking_hooks)
861       OnPotentiallyBlockingRegionEnd();
862   });
863 
864   for (;;) {
865     u32 cmp = atomic_load(g, memory_order_acquire);
866     if (cmp == kGuardInit) {
867       if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
868                                          memory_order_relaxed))
869         return 1;
870     } else if (cmp == kGuardDone) {
871       if (!thr->in_ignored_lib)
872         Acquire(thr, pc, (uptr)g);
873       return 0;
874     } else {
875       if ((cmp & kGuardWaiter) ||
876           atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
877                                          memory_order_relaxed))
878         FutexWait(g, cmp | kGuardWaiter);
879     }
880   }
881 }
882 
guard_release(ThreadState * thr,uptr pc,atomic_uint32_t * g)883 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g) {
884   if (!thr->in_ignored_lib)
885     Release(thr, pc, (uptr)g);
886   u32 old = atomic_exchange(g, kGuardDone, memory_order_release);
887   if (old & kGuardWaiter)
888     FutexWake(g, 1 << 30);
889 }
890 
891 // __cxa_guard_acquire and friends need to be intercepted in a special way -
892 // regular interceptors will break statically-linked libstdc++. Linux
893 // interceptors are especially defined as weak functions (so that they don't
894 // cause link errors when user defines them as well). So they silently
895 // auto-disable themselves when such symbol is already present in the binary. If
896 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
897 // will silently replace our interceptor.  That's why on Linux we simply export
898 // these interceptors with INTERFACE_ATTRIBUTE.
899 // On OS X, we don't support statically linking, so we just use a regular
900 // interceptor.
901 #if SANITIZER_MAC
902 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
903 #else
904 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
905   extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
906 #endif
907 
908 // Used in thread-safe function static initialization.
STDCXX_INTERCEPTOR(int,__cxa_guard_acquire,atomic_uint32_t * g)909 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
910   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
911   return guard_acquire(thr, pc, g);
912 }
913 
STDCXX_INTERCEPTOR(void,__cxa_guard_release,atomic_uint32_t * g)914 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
915   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
916   guard_release(thr, pc, g);
917 }
918 
STDCXX_INTERCEPTOR(void,__cxa_guard_abort,atomic_uint32_t * g)919 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
920   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
921   atomic_store(g, kGuardInit, memory_order_relaxed);
922 }
923 
924 namespace __tsan {
DestroyThreadState()925 void DestroyThreadState() {
926   ThreadState *thr = cur_thread();
927   Processor *proc = thr->proc();
928   ThreadFinish(thr);
929   ProcUnwire(proc, thr);
930   ProcDestroy(proc);
931   DTLS_Destroy();
932   cur_thread_finalize();
933 }
934 
PlatformCleanUpThreadState(ThreadState * thr)935 void PlatformCleanUpThreadState(ThreadState *thr) {
936   ThreadSignalContext *sctx = thr->signal_ctx;
937   if (sctx) {
938     thr->signal_ctx = 0;
939     UnmapOrDie(sctx, sizeof(*sctx));
940   }
941 }
942 }  // namespace __tsan
943 
944 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
thread_finalize(void * v)945 static void thread_finalize(void *v) {
946   uptr iter = (uptr)v;
947   if (iter > 1) {
948     if (pthread_setspecific(interceptor_ctx()->finalize_key,
949         (void*)(iter - 1))) {
950       Printf("ThreadSanitizer: failed to set thread key\n");
951       Die();
952     }
953     return;
954   }
955   DestroyThreadState();
956 }
957 #endif
958 
959 
960 struct ThreadParam {
961   void* (*callback)(void *arg);
962   void *param;
963   Tid tid;
964   Semaphore created;
965   Semaphore started;
966 };
967 
__tsan_thread_start_func(void * arg)968 extern "C" void *__tsan_thread_start_func(void *arg) {
969   ThreadParam *p = (ThreadParam*)arg;
970   void* (*callback)(void *arg) = p->callback;
971   void *param = p->param;
972   {
973     ThreadState *thr = cur_thread_init();
974     // Thread-local state is not initialized yet.
975     ScopedIgnoreInterceptors ignore;
976 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
977     ThreadIgnoreBegin(thr, 0);
978     if (pthread_setspecific(interceptor_ctx()->finalize_key,
979                             (void *)GetPthreadDestructorIterations())) {
980       Printf("ThreadSanitizer: failed to set thread key\n");
981       Die();
982     }
983     ThreadIgnoreEnd(thr);
984 #endif
985     p->created.Wait();
986     Processor *proc = ProcCreate();
987     ProcWire(proc, thr);
988     ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
989     p->started.Post();
990   }
991   void *res = callback(param);
992   // Prevent the callback from being tail called,
993   // it mixes up stack traces.
994   volatile int foo = 42;
995   foo++;
996   return res;
997 }
998 
TSAN_INTERCEPTOR(int,pthread_create,void * th,void * attr,void * (* callback)(void *),void * param)999 TSAN_INTERCEPTOR(int, pthread_create,
1000     void *th, void *attr, void *(*callback)(void*), void * param) {
1001   SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1002 
1003   MaybeSpawnBackgroundThread();
1004 
1005   if (ctx->after_multithreaded_fork) {
1006     if (flags()->die_after_fork) {
1007       Report("ThreadSanitizer: starting new threads after multi-threaded "
1008           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1009       Die();
1010     } else {
1011       VPrintf(1,
1012               "ThreadSanitizer: starting new threads after multi-threaded "
1013               "fork is not supported (pid %lu). Continuing because of "
1014               "die_after_fork=0, but you are on your own\n",
1015               internal_getpid());
1016     }
1017   }
1018   __sanitizer_pthread_attr_t myattr;
1019   if (attr == 0) {
1020     pthread_attr_init(&myattr);
1021     attr = &myattr;
1022   }
1023   int detached = 0;
1024   REAL(pthread_attr_getdetachstate)(attr, &detached);
1025   AdjustStackSize(attr);
1026 
1027   ThreadParam p;
1028   p.callback = callback;
1029   p.param = param;
1030   p.tid = kMainTid;
1031   int res = -1;
1032   {
1033     // Otherwise we see false positives in pthread stack manipulation.
1034     ScopedIgnoreInterceptors ignore;
1035     ThreadIgnoreBegin(thr, pc);
1036     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1037     ThreadIgnoreEnd(thr);
1038   }
1039   if (res == 0) {
1040     p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1041     CHECK_NE(p.tid, kMainTid);
1042     // Synchronization on p.tid serves two purposes:
1043     // 1. ThreadCreate must finish before the new thread starts.
1044     //    Otherwise the new thread can call pthread_detach, but the pthread_t
1045     //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1046     // 2. ThreadStart must finish before this thread continues.
1047     //    Otherwise, this thread can call pthread_detach and reset thr->sync
1048     //    before the new thread got a chance to acquire from it in ThreadStart.
1049     p.created.Post();
1050     p.started.Wait();
1051   }
1052   if (attr == &myattr)
1053     pthread_attr_destroy(&myattr);
1054   return res;
1055 }
1056 
TSAN_INTERCEPTOR(int,pthread_join,void * th,void ** ret)1057 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1058   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1059   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1060   ThreadIgnoreBegin(thr, pc);
1061   int res = BLOCK_REAL(pthread_join)(th, ret);
1062   ThreadIgnoreEnd(thr);
1063   if (res == 0) {
1064     ThreadJoin(thr, pc, tid);
1065   }
1066   return res;
1067 }
1068 
1069 DEFINE_REAL_PTHREAD_FUNCTIONS
1070 
TSAN_INTERCEPTOR(int,pthread_detach,void * th)1071 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1072   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1073   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1074   int res = REAL(pthread_detach)(th);
1075   if (res == 0) {
1076     ThreadDetach(thr, pc, tid);
1077   }
1078   return res;
1079 }
1080 
TSAN_INTERCEPTOR(void,pthread_exit,void * retval)1081 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1082   {
1083     SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1084 #if !SANITIZER_MAC && !SANITIZER_ANDROID
1085     CHECK_EQ(thr, reinterpret_cast<char *>((reinterpret_cast<uptr>(cur_thread_placeholder) + SANITIZER_CACHE_LINE_SIZE - 1) & ~static_cast<uptr>(SANITIZER_CACHE_LINE_SIZE - 1)));
1086 #endif
1087   }
1088   REAL(pthread_exit)(retval);
1089 }
1090 
1091 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,pthread_tryjoin_np,void * th,void ** ret)1092 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1093   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1094   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1095   ThreadIgnoreBegin(thr, pc);
1096   int res = REAL(pthread_tryjoin_np)(th, ret);
1097   ThreadIgnoreEnd(thr);
1098   if (res == 0)
1099     ThreadJoin(thr, pc, tid);
1100   else
1101     ThreadNotJoined(thr, pc, tid, (uptr)th);
1102   return res;
1103 }
1104 
TSAN_INTERCEPTOR(int,pthread_timedjoin_np,void * th,void ** ret,const struct timespec * abstime)1105 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1106                  const struct timespec *abstime) {
1107   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1108   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1109   ThreadIgnoreBegin(thr, pc);
1110   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1111   ThreadIgnoreEnd(thr);
1112   if (res == 0)
1113     ThreadJoin(thr, pc, tid);
1114   else
1115     ThreadNotJoined(thr, pc, tid, (uptr)th);
1116   return res;
1117 }
1118 #endif
1119 
1120 // Problem:
1121 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1122 // pthread_cond_t has different size in the different versions.
1123 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1124 // after pthread_cond_t (old cond is smaller).
1125 // If we call old REAL functions for new pthread_cond_t, we will lose  some
1126 // functionality (e.g. old functions do not support waiting against
1127 // CLOCK_REALTIME).
1128 // Proper handling would require to have 2 versions of interceptors as well.
1129 // But this is messy, in particular requires linker scripts when sanitizer
1130 // runtime is linked into a shared library.
1131 // Instead we assume we don't have dynamic libraries built against old
1132 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1133 // that allows to work with old libraries (but this mode does not support
1134 // some features, e.g. pthread_condattr_getpshared).
init_cond(void * c,bool force=false)1135 static void *init_cond(void *c, bool force = false) {
1136   // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1137   // So we allocate additional memory on the side large enough to hold
1138   // any pthread_cond_t object. Always call new REAL functions, but pass
1139   // the aux object to them.
1140   // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1141   // first word of pthread_cond_t to zero.
1142   // It's all relevant only for linux.
1143   if (!common_flags()->legacy_pthread_cond)
1144     return c;
1145   atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1146   uptr cond = atomic_load(p, memory_order_acquire);
1147   if (!force && cond != 0)
1148     return (void*)cond;
1149   void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1150   internal_memset(newcond, 0, pthread_cond_t_sz);
1151   if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1152       memory_order_acq_rel))
1153     return newcond;
1154   WRAP(free)(newcond);
1155   return (void*)cond;
1156 }
1157 
1158 namespace {
1159 
1160 template <class Fn>
1161 struct CondMutexUnlockCtx {
1162   ScopedInterceptor *si;
1163   ThreadState *thr;
1164   uptr pc;
1165   void *m;
1166   void *c;
1167   const Fn &fn;
1168 
Cancel__anonfb39086c0211::CondMutexUnlockCtx1169   int Cancel() const { return fn(); }
1170   void Unlock() const;
1171 };
1172 
1173 template <class Fn>
Unlock() const1174 void CondMutexUnlockCtx<Fn>::Unlock() const {
1175   // pthread_cond_wait interceptor has enabled async signal delivery
1176   // (see BlockingCall below). Disable async signals since we are running
1177   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1178   // since the thread is cancelled, so we have to manually execute them
1179   // (the thread still can run some user code due to pthread_cleanup_push).
1180   ThreadSignalContext *ctx = SigCtx(thr);
1181   CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
1182   atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
1183   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1184   // Undo BlockingCall ctor effects.
1185   thr->ignore_interceptors--;
1186   si->~ScopedInterceptor();
1187 }
1188 }  // namespace
1189 
INTERCEPTOR(int,pthread_cond_init,void * c,void * a)1190 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1191   void *cond = init_cond(c, true);
1192   SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1193   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1194   return REAL(pthread_cond_init)(cond, a);
1195 }
1196 
1197 template <class Fn>
cond_wait(ThreadState * thr,uptr pc,ScopedInterceptor * si,const Fn & fn,void * c,void * m)1198 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1199               void *c, void *m) {
1200   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1201   MutexUnlock(thr, pc, (uptr)m);
1202   int res = 0;
1203   // This ensures that we handle mutex lock even in case of pthread_cancel.
1204   // See test/tsan/cond_cancel.cpp.
1205   {
1206     // Enable signal delivery while the thread is blocked.
1207     BlockingCall bc(thr);
1208     CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1209     res = call_pthread_cancel_with_cleanup(
1210         [](void *arg) -> int {
1211           return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1212         },
1213         [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1214         &arg);
1215   }
1216   if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1217   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1218   return res;
1219 }
1220 
INTERCEPTOR(int,pthread_cond_wait,void * c,void * m)1221 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1222   void *cond = init_cond(c);
1223   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1224   return cond_wait(
1225       thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1226       m);
1227 }
1228 
INTERCEPTOR(int,pthread_cond_timedwait,void * c,void * m,void * abstime)1229 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1230   void *cond = init_cond(c);
1231   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1232   return cond_wait(
1233       thr, pc, &si,
1234       [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1235       m);
1236 }
1237 
1238 #if SANITIZER_LINUX
INTERCEPTOR(int,pthread_cond_clockwait,void * c,void * m,__sanitizer_clockid_t clock,void * abstime)1239 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1240             __sanitizer_clockid_t clock, void *abstime) {
1241   void *cond = init_cond(c);
1242   SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1243   return cond_wait(
1244       thr, pc, &si,
1245       [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1246       cond, m);
1247 }
1248 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1249 #else
1250 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1251 #endif
1252 
1253 #if SANITIZER_MAC
INTERCEPTOR(int,pthread_cond_timedwait_relative_np,void * c,void * m,void * reltime)1254 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1255             void *reltime) {
1256   void *cond = init_cond(c);
1257   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1258   return cond_wait(
1259       thr, pc, &si,
1260       [=]() {
1261         return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1262       },
1263       cond, m);
1264 }
1265 #endif
1266 
INTERCEPTOR(int,pthread_cond_signal,void * c)1267 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1268   void *cond = init_cond(c);
1269   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1270   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1271   return REAL(pthread_cond_signal)(cond);
1272 }
1273 
INTERCEPTOR(int,pthread_cond_broadcast,void * c)1274 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1275   void *cond = init_cond(c);
1276   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1277   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1278   return REAL(pthread_cond_broadcast)(cond);
1279 }
1280 
INTERCEPTOR(int,pthread_cond_destroy,void * c)1281 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1282   void *cond = init_cond(c);
1283   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1284   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1285   int res = REAL(pthread_cond_destroy)(cond);
1286   if (common_flags()->legacy_pthread_cond) {
1287     // Free our aux cond and zero the pointer to not leave dangling pointers.
1288     WRAP(free)(cond);
1289     atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1290   }
1291   return res;
1292 }
1293 
TSAN_INTERCEPTOR(int,pthread_mutex_init,void * m,void * a)1294 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1295   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1296   int res = REAL(pthread_mutex_init)(m, a);
1297   if (res == 0) {
1298     u32 flagz = 0;
1299     if (a) {
1300       int type = 0;
1301       if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1302         if (type == PTHREAD_MUTEX_RECURSIVE ||
1303             type == PTHREAD_MUTEX_RECURSIVE_NP)
1304           flagz |= MutexFlagWriteReentrant;
1305     }
1306     MutexCreate(thr, pc, (uptr)m, flagz);
1307   }
1308   return res;
1309 }
1310 
TSAN_INTERCEPTOR(int,pthread_mutex_destroy,void * m)1311 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1312   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1313   int res = REAL(pthread_mutex_destroy)(m);
1314   if (res == 0 || res == errno_EBUSY) {
1315     MutexDestroy(thr, pc, (uptr)m);
1316   }
1317   return res;
1318 }
1319 
TSAN_INTERCEPTOR(int,pthread_mutex_trylock,void * m)1320 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1321   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1322   int res = REAL(pthread_mutex_trylock)(m);
1323   if (res == errno_EOWNERDEAD)
1324     MutexRepair(thr, pc, (uptr)m);
1325   if (res == 0 || res == errno_EOWNERDEAD)
1326     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1327   return res;
1328 }
1329 
1330 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_mutex_timedlock,void * m,void * abstime)1331 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1332   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1333   int res = REAL(pthread_mutex_timedlock)(m, abstime);
1334   if (res == 0) {
1335     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1336   }
1337   return res;
1338 }
1339 #endif
1340 
1341 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_spin_init,void * m,int pshared)1342 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1343   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1344   int res = REAL(pthread_spin_init)(m, pshared);
1345   if (res == 0) {
1346     MutexCreate(thr, pc, (uptr)m);
1347   }
1348   return res;
1349 }
1350 
TSAN_INTERCEPTOR(int,pthread_spin_destroy,void * m)1351 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1352   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1353   int res = REAL(pthread_spin_destroy)(m);
1354   if (res == 0) {
1355     MutexDestroy(thr, pc, (uptr)m);
1356   }
1357   return res;
1358 }
1359 
TSAN_INTERCEPTOR(int,pthread_spin_lock,void * m)1360 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1361   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1362   MutexPreLock(thr, pc, (uptr)m);
1363   int res = REAL(pthread_spin_lock)(m);
1364   if (res == 0) {
1365     MutexPostLock(thr, pc, (uptr)m);
1366   }
1367   return res;
1368 }
1369 
TSAN_INTERCEPTOR(int,pthread_spin_trylock,void * m)1370 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1371   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1372   int res = REAL(pthread_spin_trylock)(m);
1373   if (res == 0) {
1374     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1375   }
1376   return res;
1377 }
1378 
TSAN_INTERCEPTOR(int,pthread_spin_unlock,void * m)1379 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1380   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1381   MutexUnlock(thr, pc, (uptr)m);
1382   int res = REAL(pthread_spin_unlock)(m);
1383   return res;
1384 }
1385 #endif
1386 
TSAN_INTERCEPTOR(int,pthread_rwlock_init,void * m,void * a)1387 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1388   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1389   int res = REAL(pthread_rwlock_init)(m, a);
1390   if (res == 0) {
1391     MutexCreate(thr, pc, (uptr)m);
1392   }
1393   return res;
1394 }
1395 
TSAN_INTERCEPTOR(int,pthread_rwlock_destroy,void * m)1396 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1397   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1398   int res = REAL(pthread_rwlock_destroy)(m);
1399   if (res == 0) {
1400     MutexDestroy(thr, pc, (uptr)m);
1401   }
1402   return res;
1403 }
1404 
TSAN_INTERCEPTOR(int,pthread_rwlock_rdlock,void * m)1405 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1406   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1407   MutexPreReadLock(thr, pc, (uptr)m);
1408   int res = REAL(pthread_rwlock_rdlock)(m);
1409   if (res == 0) {
1410     MutexPostReadLock(thr, pc, (uptr)m);
1411   }
1412   return res;
1413 }
1414 
TSAN_INTERCEPTOR(int,pthread_rwlock_tryrdlock,void * m)1415 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1416   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1417   int res = REAL(pthread_rwlock_tryrdlock)(m);
1418   if (res == 0) {
1419     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1420   }
1421   return res;
1422 }
1423 
1424 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_rwlock_timedrdlock,void * m,void * abstime)1425 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1426   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1427   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1428   if (res == 0) {
1429     MutexPostReadLock(thr, pc, (uptr)m);
1430   }
1431   return res;
1432 }
1433 #endif
1434 
TSAN_INTERCEPTOR(int,pthread_rwlock_wrlock,void * m)1435 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1436   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1437   MutexPreLock(thr, pc, (uptr)m);
1438   int res = REAL(pthread_rwlock_wrlock)(m);
1439   if (res == 0) {
1440     MutexPostLock(thr, pc, (uptr)m);
1441   }
1442   return res;
1443 }
1444 
TSAN_INTERCEPTOR(int,pthread_rwlock_trywrlock,void * m)1445 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1446   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1447   int res = REAL(pthread_rwlock_trywrlock)(m);
1448   if (res == 0) {
1449     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1450   }
1451   return res;
1452 }
1453 
1454 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_rwlock_timedwrlock,void * m,void * abstime)1455 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1456   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1457   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1458   if (res == 0) {
1459     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1460   }
1461   return res;
1462 }
1463 #endif
1464 
TSAN_INTERCEPTOR(int,pthread_rwlock_unlock,void * m)1465 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1466   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1467   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1468   int res = REAL(pthread_rwlock_unlock)(m);
1469   return res;
1470 }
1471 
1472 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pthread_barrier_init,void * b,void * a,unsigned count)1473 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1474   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1475   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1476   int res = REAL(pthread_barrier_init)(b, a, count);
1477   return res;
1478 }
1479 
TSAN_INTERCEPTOR(int,pthread_barrier_destroy,void * b)1480 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1481   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1482   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1483   int res = REAL(pthread_barrier_destroy)(b);
1484   return res;
1485 }
1486 
TSAN_INTERCEPTOR(int,pthread_barrier_wait,void * b)1487 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1488   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1489   Release(thr, pc, (uptr)b);
1490   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1491   int res = REAL(pthread_barrier_wait)(b);
1492   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1493   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1494     Acquire(thr, pc, (uptr)b);
1495   }
1496   return res;
1497 }
1498 #endif
1499 
TSAN_INTERCEPTOR(int,pthread_once,void * o,void (* f)())1500 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1501   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1502   if (o == 0 || f == 0)
1503     return errno_EINVAL;
1504   atomic_uint32_t *a;
1505 
1506   if (SANITIZER_MAC)
1507     a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1508   else if (SANITIZER_NETBSD)
1509     a = static_cast<atomic_uint32_t*>
1510           ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1511   else
1512     a = static_cast<atomic_uint32_t*>(o);
1513 
1514   // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1515   // result in crashes due to too little stack space.
1516   if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) {
1517     (*f)();
1518     guard_release(thr, pc, a);
1519   }
1520   return 0;
1521 }
1522 
1523 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,__fxstat,int version,int fd,void * buf)1524 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1525   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1526   if (fd > 0)
1527     FdAccess(thr, pc, fd);
1528   return REAL(__fxstat)(version, fd, buf);
1529 }
1530 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
1531 #else
1532 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1533 #endif
1534 
TSAN_INTERCEPTOR(int,fstat,int fd,void * buf)1535 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1536 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
1537   SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1538   if (fd > 0)
1539     FdAccess(thr, pc, fd);
1540   return REAL(fstat)(fd, buf);
1541 #else
1542   SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
1543   if (fd > 0)
1544     FdAccess(thr, pc, fd);
1545   return REAL(__fxstat)(0, fd, buf);
1546 #endif
1547 }
1548 
1549 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,__fxstat64,int version,int fd,void * buf)1550 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1551   SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1552   if (fd > 0)
1553     FdAccess(thr, pc, fd);
1554   return REAL(__fxstat64)(version, fd, buf);
1555 }
1556 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
1557 #else
1558 #define TSAN_MAYBE_INTERCEPT___FXSTAT64
1559 #endif
1560 
1561 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(int,fstat64,int fd,void * buf)1562 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1563   SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
1564   if (fd > 0)
1565     FdAccess(thr, pc, fd);
1566   return REAL(__fxstat64)(0, fd, buf);
1567 }
1568 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1569 #else
1570 #define TSAN_MAYBE_INTERCEPT_FSTAT64
1571 #endif
1572 
TSAN_INTERCEPTOR(int,open,const char * name,int oflag,...)1573 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1574   va_list ap;
1575   va_start(ap, oflag);
1576   mode_t mode = va_arg(ap, int);
1577   va_end(ap);
1578   SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1579   READ_STRING(thr, pc, name, 0);
1580   int fd = REAL(open)(name, oflag, mode);
1581   if (fd >= 0)
1582     FdFileCreate(thr, pc, fd);
1583   return fd;
1584 }
1585 
1586 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,open64,const char * name,int oflag,...)1587 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1588   va_list ap;
1589   va_start(ap, oflag);
1590   mode_t mode = va_arg(ap, int);
1591   va_end(ap);
1592   SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1593   READ_STRING(thr, pc, name, 0);
1594   int fd = REAL(open64)(name, oflag, mode);
1595   if (fd >= 0)
1596     FdFileCreate(thr, pc, fd);
1597   return fd;
1598 }
1599 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1600 #else
1601 #define TSAN_MAYBE_INTERCEPT_OPEN64
1602 #endif
1603 
TSAN_INTERCEPTOR(int,creat,const char * name,int mode)1604 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1605   SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1606   READ_STRING(thr, pc, name, 0);
1607   int fd = REAL(creat)(name, mode);
1608   if (fd >= 0)
1609     FdFileCreate(thr, pc, fd);
1610   return fd;
1611 }
1612 
1613 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,creat64,const char * name,int mode)1614 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1615   SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1616   READ_STRING(thr, pc, name, 0);
1617   int fd = REAL(creat64)(name, mode);
1618   if (fd >= 0)
1619     FdFileCreate(thr, pc, fd);
1620   return fd;
1621 }
1622 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1623 #else
1624 #define TSAN_MAYBE_INTERCEPT_CREAT64
1625 #endif
1626 
TSAN_INTERCEPTOR(int,dup,int oldfd)1627 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1628   SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1629   int newfd = REAL(dup)(oldfd);
1630   if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1631     FdDup(thr, pc, oldfd, newfd, true);
1632   return newfd;
1633 }
1634 
TSAN_INTERCEPTOR(int,dup2,int oldfd,int newfd)1635 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1636   SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1637   int newfd2 = REAL(dup2)(oldfd, newfd);
1638   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1639     FdDup(thr, pc, oldfd, newfd2, false);
1640   return newfd2;
1641 }
1642 
1643 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,dup3,int oldfd,int newfd,int flags)1644 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1645   SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1646   int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1647   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1648     FdDup(thr, pc, oldfd, newfd2, false);
1649   return newfd2;
1650 }
1651 #endif
1652 
1653 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,eventfd,unsigned initval,int flags)1654 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1655   SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1656   int fd = REAL(eventfd)(initval, flags);
1657   if (fd >= 0)
1658     FdEventCreate(thr, pc, fd);
1659   return fd;
1660 }
1661 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1662 #else
1663 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1664 #endif
1665 
1666 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,signalfd,int fd,void * mask,int flags)1667 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1668   SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
1669   if (fd >= 0)
1670     FdClose(thr, pc, fd);
1671   fd = REAL(signalfd)(fd, mask, flags);
1672   if (fd >= 0)
1673     FdSignalCreate(thr, pc, fd);
1674   return fd;
1675 }
1676 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1677 #else
1678 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1679 #endif
1680 
1681 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init,int fake)1682 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1683   SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1684   int fd = REAL(inotify_init)(fake);
1685   if (fd >= 0)
1686     FdInotifyCreate(thr, pc, fd);
1687   return fd;
1688 }
1689 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1690 #else
1691 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1692 #endif
1693 
1694 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,inotify_init1,int flags)1695 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1696   SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1697   int fd = REAL(inotify_init1)(flags);
1698   if (fd >= 0)
1699     FdInotifyCreate(thr, pc, fd);
1700   return fd;
1701 }
1702 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1703 #else
1704 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1705 #endif
1706 
TSAN_INTERCEPTOR(int,socket,int domain,int type,int protocol)1707 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1708   SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1709   int fd = REAL(socket)(domain, type, protocol);
1710   if (fd >= 0)
1711     FdSocketCreate(thr, pc, fd);
1712   return fd;
1713 }
1714 
TSAN_INTERCEPTOR(int,socketpair,int domain,int type,int protocol,int * fd)1715 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1716   SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1717   int res = REAL(socketpair)(domain, type, protocol, fd);
1718   if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1719     FdPipeCreate(thr, pc, fd[0], fd[1]);
1720   return res;
1721 }
1722 
TSAN_INTERCEPTOR(int,connect,int fd,void * addr,unsigned addrlen)1723 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1724   SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1725   FdSocketConnecting(thr, pc, fd);
1726   int res = REAL(connect)(fd, addr, addrlen);
1727   if (res == 0 && fd >= 0)
1728     FdSocketConnect(thr, pc, fd);
1729   return res;
1730 }
1731 
TSAN_INTERCEPTOR(int,bind,int fd,void * addr,unsigned addrlen)1732 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1733   SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1734   int res = REAL(bind)(fd, addr, addrlen);
1735   if (fd > 0 && res == 0)
1736     FdAccess(thr, pc, fd);
1737   return res;
1738 }
1739 
TSAN_INTERCEPTOR(int,listen,int fd,int backlog)1740 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1741   SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1742   int res = REAL(listen)(fd, backlog);
1743   if (fd > 0 && res == 0)
1744     FdAccess(thr, pc, fd);
1745   return res;
1746 }
1747 
TSAN_INTERCEPTOR(int,close,int fd)1748 TSAN_INTERCEPTOR(int, close, int fd) {
1749   SCOPED_TSAN_INTERCEPTOR(close, fd);
1750   if (fd >= 0)
1751     FdClose(thr, pc, fd);
1752   return REAL(close)(fd);
1753 }
1754 
1755 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,__close,int fd)1756 TSAN_INTERCEPTOR(int, __close, int fd) {
1757   SCOPED_TSAN_INTERCEPTOR(__close, fd);
1758   if (fd >= 0)
1759     FdClose(thr, pc, fd);
1760   return REAL(__close)(fd);
1761 }
1762 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1763 #else
1764 #define TSAN_MAYBE_INTERCEPT___CLOSE
1765 #endif
1766 
1767 // glibc guts
1768 #if SANITIZER_LINUX && !SANITIZER_ANDROID
TSAN_INTERCEPTOR(void,__res_iclose,void * state,bool free_addr)1769 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1770   SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
1771   int fds[64];
1772   int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1773   for (int i = 0; i < cnt; i++) {
1774     if (fds[i] > 0)
1775       FdClose(thr, pc, fds[i]);
1776   }
1777   REAL(__res_iclose)(state, free_addr);
1778 }
1779 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1780 #else
1781 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1782 #endif
1783 
TSAN_INTERCEPTOR(int,pipe,int * pipefd)1784 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1785   SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1786   int res = REAL(pipe)(pipefd);
1787   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1788     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1789   return res;
1790 }
1791 
1792 #if !SANITIZER_MAC
TSAN_INTERCEPTOR(int,pipe2,int * pipefd,int flags)1793 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1794   SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1795   int res = REAL(pipe2)(pipefd, flags);
1796   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1797     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1798   return res;
1799 }
1800 #endif
1801 
TSAN_INTERCEPTOR(int,unlink,char * path)1802 TSAN_INTERCEPTOR(int, unlink, char *path) {
1803   SCOPED_TSAN_INTERCEPTOR(unlink, path);
1804   Release(thr, pc, File2addr(path));
1805   int res = REAL(unlink)(path);
1806   return res;
1807 }
1808 
TSAN_INTERCEPTOR(void *,tmpfile,int fake)1809 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1810   SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1811   void *res = REAL(tmpfile)(fake);
1812   if (res) {
1813     int fd = fileno_unlocked(res);
1814     if (fd >= 0)
1815       FdFileCreate(thr, pc, fd);
1816   }
1817   return res;
1818 }
1819 
1820 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(void *,tmpfile64,int fake)1821 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1822   SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1823   void *res = REAL(tmpfile64)(fake);
1824   if (res) {
1825     int fd = fileno_unlocked(res);
1826     if (fd >= 0)
1827       FdFileCreate(thr, pc, fd);
1828   }
1829   return res;
1830 }
1831 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1832 #else
1833 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1834 #endif
1835 
FlushStreams()1836 static void FlushStreams() {
1837   // Flushing all the streams here may freeze the process if a child thread is
1838   // performing file stream operations at the same time.
1839   REAL(fflush)(stdout);
1840   REAL(fflush)(stderr);
1841 }
1842 
TSAN_INTERCEPTOR(void,abort,int fake)1843 TSAN_INTERCEPTOR(void, abort, int fake) {
1844   SCOPED_TSAN_INTERCEPTOR(abort, fake);
1845   FlushStreams();
1846   REAL(abort)(fake);
1847 }
1848 
TSAN_INTERCEPTOR(int,rmdir,char * path)1849 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1850   SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1851   Release(thr, pc, Dir2addr(path));
1852   int res = REAL(rmdir)(path);
1853   return res;
1854 }
1855 
TSAN_INTERCEPTOR(int,closedir,void * dirp)1856 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1857   SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
1858   if (dirp) {
1859     int fd = dirfd(dirp);
1860     FdClose(thr, pc, fd);
1861   }
1862   return REAL(closedir)(dirp);
1863 }
1864 
1865 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,epoll_create,int size)1866 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1867   SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1868   int fd = REAL(epoll_create)(size);
1869   if (fd >= 0)
1870     FdPollCreate(thr, pc, fd);
1871   return fd;
1872 }
1873 
TSAN_INTERCEPTOR(int,epoll_create1,int flags)1874 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1875   SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1876   int fd = REAL(epoll_create1)(flags);
1877   if (fd >= 0)
1878     FdPollCreate(thr, pc, fd);
1879   return fd;
1880 }
1881 
TSAN_INTERCEPTOR(int,epoll_ctl,int epfd,int op,int fd,void * ev)1882 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1883   SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1884   if (epfd >= 0)
1885     FdAccess(thr, pc, epfd);
1886   if (epfd >= 0 && fd >= 0)
1887     FdAccess(thr, pc, fd);
1888   if (op == EPOLL_CTL_ADD && epfd >= 0)
1889     FdRelease(thr, pc, epfd);
1890   int res = REAL(epoll_ctl)(epfd, op, fd, ev);
1891   return res;
1892 }
1893 
TSAN_INTERCEPTOR(int,epoll_wait,int epfd,void * ev,int cnt,int timeout)1894 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
1895   SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
1896   if (epfd >= 0)
1897     FdAccess(thr, pc, epfd);
1898   int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
1899   if (res > 0 && epfd >= 0)
1900     FdAcquire(thr, pc, epfd);
1901   return res;
1902 }
1903 
TSAN_INTERCEPTOR(int,epoll_pwait,int epfd,void * ev,int cnt,int timeout,void * sigmask)1904 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
1905                  void *sigmask) {
1906   SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
1907   if (epfd >= 0)
1908     FdAccess(thr, pc, epfd);
1909   int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
1910   if (res > 0 && epfd >= 0)
1911     FdAcquire(thr, pc, epfd);
1912   return res;
1913 }
1914 
1915 #define TSAN_MAYBE_INTERCEPT_EPOLL \
1916     TSAN_INTERCEPT(epoll_create); \
1917     TSAN_INTERCEPT(epoll_create1); \
1918     TSAN_INTERCEPT(epoll_ctl); \
1919     TSAN_INTERCEPT(epoll_wait); \
1920     TSAN_INTERCEPT(epoll_pwait)
1921 #else
1922 #define TSAN_MAYBE_INTERCEPT_EPOLL
1923 #endif
1924 
1925 // The following functions are intercepted merely to process pending signals.
1926 // If program blocks signal X, we must deliver the signal before the function
1927 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
1928 // it's better to deliver the signal straight away.
TSAN_INTERCEPTOR(int,sigsuspend,const __sanitizer_sigset_t * mask)1929 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
1930   SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
1931   return REAL(sigsuspend)(mask);
1932 }
1933 
TSAN_INTERCEPTOR(int,sigblock,int mask)1934 TSAN_INTERCEPTOR(int, sigblock, int mask) {
1935   SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
1936   return REAL(sigblock)(mask);
1937 }
1938 
TSAN_INTERCEPTOR(int,sigsetmask,int mask)1939 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
1940   SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
1941   return REAL(sigsetmask)(mask);
1942 }
1943 
TSAN_INTERCEPTOR(int,pthread_sigmask,int how,const __sanitizer_sigset_t * set,__sanitizer_sigset_t * oldset)1944 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
1945     __sanitizer_sigset_t *oldset) {
1946   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
1947   return REAL(pthread_sigmask)(how, set, oldset);
1948 }
1949 
1950 namespace __tsan {
1951 
ReportErrnoSpoiling(ThreadState * thr,uptr pc)1952 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) {
1953   VarSizeStackTrace stack;
1954   // StackTrace::GetNestInstructionPc(pc) is used because return address is
1955   // expected, OutputReport() will undo this.
1956   ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
1957   ThreadRegistryLock l(&ctx->thread_registry);
1958   ScopedReport rep(ReportTypeErrnoInSignal);
1959   if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
1960     rep.AddStack(stack, true);
1961     OutputReport(thr, rep);
1962   }
1963 }
1964 
CallUserSignalHandler(ThreadState * thr,bool sync,bool acquire,int sig,__sanitizer_siginfo * info,void * uctx)1965 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
1966                                   int sig, __sanitizer_siginfo *info,
1967                                   void *uctx) {
1968   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
1969   if (acquire)
1970     Acquire(thr, 0, (uptr)&sigactions[sig]);
1971   // Signals are generally asynchronous, so if we receive a signals when
1972   // ignores are enabled we should disable ignores. This is critical for sync
1973   // and interceptors, because otherwise we can miss synchronization and report
1974   // false races.
1975   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
1976   int ignore_interceptors = thr->ignore_interceptors;
1977   int ignore_sync = thr->ignore_sync;
1978   // For symbolizer we only process SIGSEGVs synchronously
1979   // (bug in symbolizer or in tsan). But we want to reset
1980   // in_symbolizer to fail gracefully. Symbolizer and user code
1981   // use different memory allocators, so if we don't reset
1982   // in_symbolizer we can get memory allocated with one being
1983   // feed with another, which can cause more crashes.
1984   int in_symbolizer = thr->in_symbolizer;
1985   if (!ctx->after_multithreaded_fork) {
1986     thr->ignore_reads_and_writes = 0;
1987     thr->fast_state.ClearIgnoreBit();
1988     thr->ignore_interceptors = 0;
1989     thr->ignore_sync = 0;
1990     thr->in_symbolizer = 0;
1991   }
1992   // Ensure that the handler does not spoil errno.
1993   const int saved_errno = errno;
1994   errno = 99;
1995   // This code races with sigaction. Be careful to not read sa_sigaction twice.
1996   // Also need to remember pc for reporting before the call,
1997   // because the handler can reset it.
1998   volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
1999                          ? (uptr)sigactions[sig].sigaction
2000                          : (uptr)sigactions[sig].handler;
2001   if (pc != sig_dfl && pc != sig_ign) {
2002     // The callback can be either sa_handler or sa_sigaction.
2003     // They have different signatures, but we assume that passing
2004     // additional arguments to sa_handler works and is harmless.
2005     ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2006   }
2007   if (!ctx->after_multithreaded_fork) {
2008     thr->ignore_reads_and_writes = ignore_reads_and_writes;
2009     if (ignore_reads_and_writes)
2010       thr->fast_state.SetIgnoreBit();
2011     thr->ignore_interceptors = ignore_interceptors;
2012     thr->ignore_sync = ignore_sync;
2013     thr->in_symbolizer = in_symbolizer;
2014   }
2015   // We do not detect errno spoiling for SIGTERM,
2016   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2017   // tsan reports false positive in such case.
2018   // It's difficult to properly detect this situation (reraise),
2019   // because in async signal processing case (when handler is called directly
2020   // from rtl_generic_sighandler) we have not yet received the reraised
2021   // signal; and it looks too fragile to intercept all ways to reraise a signal.
2022   if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2023       errno != 99)
2024     ReportErrnoSpoiling(thr, pc);
2025   errno = saved_errno;
2026 }
2027 
ProcessPendingSignalsImpl(ThreadState * thr)2028 void ProcessPendingSignalsImpl(ThreadState *thr) {
2029   atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2030   ThreadSignalContext *sctx = SigCtx(thr);
2031   if (sctx == 0)
2032     return;
2033   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2034   internal_sigfillset(&sctx->emptyset);
2035   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2036   CHECK_EQ(res, 0);
2037   for (int sig = 0; sig < kSigCount; sig++) {
2038     SignalDesc *signal = &sctx->pending_signals[sig];
2039     if (signal->armed) {
2040       signal->armed = false;
2041       CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2042                             &signal->ctx);
2043     }
2044   }
2045   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2046   CHECK_EQ(res, 0);
2047   atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2048 }
2049 
2050 }  // namespace __tsan
2051 
is_sync_signal(ThreadSignalContext * sctx,int sig)2052 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
2053   return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2054          sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
2055          // If we are sending signal to ourselves, we must process it now.
2056          (sctx && sig == sctx->int_signal_send);
2057 }
2058 
sighandler(int sig,__sanitizer_siginfo * info,void * ctx)2059 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2060   ThreadState *thr = cur_thread_init();
2061   ThreadSignalContext *sctx = SigCtx(thr);
2062   if (sig < 0 || sig >= kSigCount) {
2063     VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2064     return;
2065   }
2066   // Don't mess with synchronous signals.
2067   const bool sync = is_sync_signal(sctx, sig);
2068   if (sync ||
2069       // If we are in blocking function, we can safely process it now
2070       // (but check if we are in a recursive interceptor,
2071       // i.e. pthread_join()->munmap()).
2072       (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
2073     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2074     if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
2075       atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
2076       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2077       atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
2078     } else {
2079       // Be very conservative with when we do acquire in this case.
2080       // It's unsafe to do acquire in async handlers, because ThreadState
2081       // can be in inconsistent state.
2082       // SIGSYS looks relatively safe -- it's synchronous and can actually
2083       // need some global state.
2084       bool acq = (sig == SIGSYS);
2085       CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2086     }
2087     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2088     return;
2089   }
2090 
2091   if (sctx == 0)
2092     return;
2093   SignalDesc *signal = &sctx->pending_signals[sig];
2094   if (signal->armed == false) {
2095     signal->armed = true;
2096     internal_memcpy(&signal->siginfo, info, sizeof(*info));
2097     internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2098     atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2099   }
2100 }
2101 
TSAN_INTERCEPTOR(int,raise,int sig)2102 TSAN_INTERCEPTOR(int, raise, int sig) {
2103   SCOPED_TSAN_INTERCEPTOR(raise, sig);
2104   ThreadSignalContext *sctx = SigCtx(thr);
2105   CHECK_NE(sctx, 0);
2106   int prev = sctx->int_signal_send;
2107   sctx->int_signal_send = sig;
2108   int res = REAL(raise)(sig);
2109   CHECK_EQ(sctx->int_signal_send, sig);
2110   sctx->int_signal_send = prev;
2111   return res;
2112 }
2113 
TSAN_INTERCEPTOR(int,kill,int pid,int sig)2114 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2115   SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2116   ThreadSignalContext *sctx = SigCtx(thr);
2117   CHECK_NE(sctx, 0);
2118   int prev = sctx->int_signal_send;
2119   if (pid == (int)internal_getpid()) {
2120     sctx->int_signal_send = sig;
2121   }
2122   int res = REAL(kill)(pid, sig);
2123   if (pid == (int)internal_getpid()) {
2124     CHECK_EQ(sctx->int_signal_send, sig);
2125     sctx->int_signal_send = prev;
2126   }
2127   return res;
2128 }
2129 
TSAN_INTERCEPTOR(int,pthread_kill,void * tid,int sig)2130 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2131   SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2132   ThreadSignalContext *sctx = SigCtx(thr);
2133   CHECK_NE(sctx, 0);
2134   int prev = sctx->int_signal_send;
2135   if (tid == pthread_self()) {
2136     sctx->int_signal_send = sig;
2137   }
2138   int res = REAL(pthread_kill)(tid, sig);
2139   if (tid == pthread_self()) {
2140     CHECK_EQ(sctx->int_signal_send, sig);
2141     sctx->int_signal_send = prev;
2142   }
2143   return res;
2144 }
2145 
TSAN_INTERCEPTOR(int,gettimeofday,void * tv,void * tz)2146 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2147   SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2148   // It's intercepted merely to process pending signals.
2149   return REAL(gettimeofday)(tv, tz);
2150 }
2151 
TSAN_INTERCEPTOR(int,getaddrinfo,void * node,void * service,void * hints,void * rv)2152 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2153     void *hints, void *rv) {
2154   SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2155   // We miss atomic synchronization in getaddrinfo,
2156   // and can report false race between malloc and free
2157   // inside of getaddrinfo. So ignore memory accesses.
2158   ThreadIgnoreBegin(thr, pc);
2159   int res = REAL(getaddrinfo)(node, service, hints, rv);
2160   ThreadIgnoreEnd(thr);
2161   return res;
2162 }
2163 
TSAN_INTERCEPTOR(int,fork,int fake)2164 TSAN_INTERCEPTOR(int, fork, int fake) {
2165   if (in_symbolizer())
2166     return REAL(fork)(fake);
2167   SCOPED_INTERCEPTOR_RAW(fork, fake);
2168   return REAL(fork)(fake);
2169 }
2170 
atfork_prepare()2171 void atfork_prepare() {
2172   if (in_symbolizer())
2173     return;
2174   ThreadState *thr = cur_thread();
2175   const uptr pc = StackTrace::GetCurrentPc();
2176   ForkBefore(thr, pc);
2177 }
2178 
atfork_parent()2179 void atfork_parent() {
2180   if (in_symbolizer())
2181     return;
2182   ThreadState *thr = cur_thread();
2183   const uptr pc = StackTrace::GetCurrentPc();
2184   ForkParentAfter(thr, pc);
2185 }
2186 
atfork_child()2187 void atfork_child() {
2188   if (in_symbolizer())
2189     return;
2190   ThreadState *thr = cur_thread();
2191   const uptr pc = StackTrace::GetCurrentPc();
2192   ForkChildAfter(thr, pc, true);
2193   FdOnFork(thr, pc);
2194 }
2195 
TSAN_INTERCEPTOR(int,vfork,int fake)2196 TSAN_INTERCEPTOR(int, vfork, int fake) {
2197   // Some programs (e.g. openjdk) call close for all file descriptors
2198   // in the child process. Under tsan it leads to false positives, because
2199   // address space is shared, so the parent process also thinks that
2200   // the descriptors are closed (while they are actually not).
2201   // This leads to false positives due to missed synchronization.
2202   // Strictly saying this is undefined behavior, because vfork child is not
2203   // allowed to call any functions other than exec/exit. But this is what
2204   // openjdk does, so we want to handle it.
2205   // We could disable interceptors in the child process. But it's not possible
2206   // to simply intercept and wrap vfork, because vfork child is not allowed
2207   // to return from the function that calls vfork, and that's exactly what
2208   // we would do. So this would require some assembly trickery as well.
2209   // Instead we simply turn vfork into fork.
2210   return WRAP(fork)(fake);
2211 }
2212 
2213 #if SANITIZER_LINUX
TSAN_INTERCEPTOR(int,clone,int (* fn)(void *),void * stack,int flags,void * arg,int * parent_tid,void * tls,pid_t * child_tid)2214 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2215                  void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2216   SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2217                          child_tid);
2218   struct Arg {
2219     int (*fn)(void *);
2220     void *arg;
2221   };
2222   auto wrapper = +[](void *p) -> int {
2223     auto *thr = cur_thread();
2224     uptr pc = GET_CURRENT_PC();
2225     // Start the background thread for fork, but not for clone.
2226     // For fork we did this always and it's known to work (or user code has
2227     // adopted). But if we do this for the new clone interceptor some code
2228     // (sandbox2) fails. So model we used to do for years and don't start the
2229     // background thread after clone.
2230     ForkChildAfter(thr, pc, false);
2231     FdOnFork(thr, pc);
2232     auto *arg = static_cast<Arg *>(p);
2233     return arg->fn(arg->arg);
2234   };
2235   ForkBefore(thr, pc);
2236   Arg arg_wrapper = {fn, arg};
2237   int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2238                         child_tid);
2239   ForkParentAfter(thr, pc);
2240   return pid;
2241 }
2242 #endif
2243 
2244 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2245 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2246                                     void *data);
2247 struct dl_iterate_phdr_data {
2248   ThreadState *thr;
2249   uptr pc;
2250   dl_iterate_phdr_cb_t cb;
2251   void *data;
2252 };
2253 
IsAppNotRodata(uptr addr)2254 static bool IsAppNotRodata(uptr addr) {
2255   return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata;
2256 }
2257 
dl_iterate_phdr_cb(__sanitizer_dl_phdr_info * info,SIZE_T size,void * data)2258 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2259                               void *data) {
2260   dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2261   // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2262   // accessible in dl_iterate_phdr callback. But we don't see synchronization
2263   // inside of dynamic linker, so we "unpoison" it here in order to not
2264   // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2265   // because some libc functions call __libc_dlopen.
2266   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2267     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2268                      internal_strlen(info->dlpi_name));
2269   int res = cbdata->cb(info, size, cbdata->data);
2270   // Perform the check one more time in case info->dlpi_name was overwritten
2271   // by user callback.
2272   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2273     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2274                      internal_strlen(info->dlpi_name));
2275   return res;
2276 }
2277 
TSAN_INTERCEPTOR(int,dl_iterate_phdr,dl_iterate_phdr_cb_t cb,void * data)2278 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2279   SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2280   dl_iterate_phdr_data cbdata;
2281   cbdata.thr = thr;
2282   cbdata.pc = pc;
2283   cbdata.cb = cb;
2284   cbdata.data = data;
2285   int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2286   return res;
2287 }
2288 #endif
2289 
OnExit(ThreadState * thr)2290 static int OnExit(ThreadState *thr) {
2291   int status = Finalize(thr);
2292   FlushStreams();
2293   return status;
2294 }
2295 
2296 struct TsanInterceptorContext {
2297   ThreadState *thr;
2298   const uptr pc;
2299 };
2300 
2301 #if !SANITIZER_MAC
HandleRecvmsg(ThreadState * thr,uptr pc,__sanitizer_msghdr * msg)2302 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2303     __sanitizer_msghdr *msg) {
2304   int fds[64];
2305   int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2306   for (int i = 0; i < cnt; i++)
2307     FdEventCreate(thr, pc, fds[i]);
2308 }
2309 #endif
2310 
2311 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2312 // Causes interceptor recursion (getaddrinfo() and fopen())
2313 #undef SANITIZER_INTERCEPT_GETADDRINFO
2314 // We define our own.
2315 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2316 #define NEED_TLS_GET_ADDR
2317 #endif
2318 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2319 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2320 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2321 
2322 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
2323 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2324   INTERCEPT_FUNCTION_VER(name, ver)
2325 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2326   (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2327 
2328 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size)                    \
2329   MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr,                 \
2330                     ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
2331                     true)
2332 
2333 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size)                       \
2334   MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr,                  \
2335                     ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
2336                     false)
2337 
2338 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
2339   SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__);    \
2340   TsanInterceptorContext _ctx = {thr, pc};       \
2341   ctx = (void *)&_ctx;                           \
2342   (void)ctx;
2343 
2344 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2345   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2346   TsanInterceptorContext _ctx = {thr, pc};                \
2347   ctx = (void *)&_ctx;                                    \
2348   (void)ctx;
2349 
2350 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2351   if (path)                                           \
2352     Acquire(thr, pc, File2addr(path));                \
2353   if (file) {                                         \
2354     int fd = fileno_unlocked(file);                   \
2355     if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2356   }
2357 
2358 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2359   if (file) {                                    \
2360     int fd = fileno_unlocked(file);              \
2361     if (fd >= 0) FdClose(thr, pc, fd);           \
2362   }
2363 
2364 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2365   libignore()->OnLibraryLoaded(filename)
2366 
2367 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
2368   libignore()->OnLibraryUnloaded()
2369 
2370 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2371   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2372 
2373 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2374   Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2375 
2376 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2377   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2378 
2379 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2380   FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2381 
2382 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2383   FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2384 
2385 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2386   FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2387 
2388 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2389   FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2390 
2391 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2392   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2393 
2394 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
2395   __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2396 
2397 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2398 
2399 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2400   OnExit(((TsanInterceptorContext *) ctx)->thr)
2401 
2402 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
2403   MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
2404             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2405 
2406 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
2407   MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
2408             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2409 
2410 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
2411   MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
2412             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2413 
2414 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
2415   MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
2416             ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2417 
2418 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
2419   MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
2420                      ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
2421 
2422 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2423                                      off)                                   \
2424   do {                                                                      \
2425     return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2426                             off);                                           \
2427   } while (false)
2428 
2429 #if !SANITIZER_MAC
2430 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2431   HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2432       ((TsanInterceptorContext *)ctx)->pc, msg)
2433 #endif
2434 
2435 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2436   if (TsanThread *t = GetCurrentThread()) {                                    \
2437     *begin = t->tls_begin();                                                   \
2438     *end = t->tls_end();                                                       \
2439   } else {                                                                     \
2440     *begin = *end = 0;                                                         \
2441   }
2442 
2443 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2444   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2445 
2446 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2447   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2448 
2449 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2450 
2451 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2452                           __sanitizer_sigaction *old);
2453 static __sanitizer_sighandler_ptr signal_impl(int sig,
2454                                               __sanitizer_sighandler_ptr h);
2455 
2456 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2457   { return sigaction_impl(signo, act, oldact); }
2458 
2459 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2460   { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2461 
2462 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2463 
sigaction_impl(int sig,const __sanitizer_sigaction * act,__sanitizer_sigaction * old)2464 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2465                    __sanitizer_sigaction *old) {
2466   // Note: if we call REAL(sigaction) directly for any reason without proxying
2467   // the signal handler through sighandler, very bad things will happen.
2468   // The handler will run synchronously and corrupt tsan per-thread state.
2469   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2470   if (sig <= 0 || sig >= kSigCount) {
2471     errno = errno_EINVAL;
2472     return -1;
2473   }
2474   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2475   __sanitizer_sigaction old_stored;
2476   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2477   __sanitizer_sigaction newact;
2478   if (act) {
2479     // Copy act into sigactions[sig].
2480     // Can't use struct copy, because compiler can emit call to memcpy.
2481     // Can't use internal_memcpy, because it copies byte-by-byte,
2482     // and signal handler reads the handler concurrently. It it can read
2483     // some bytes from old value and some bytes from new value.
2484     // Use volatile to prevent insertion of memcpy.
2485     sigactions[sig].handler =
2486         *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2487     sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2488     internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2489                     sizeof(sigactions[sig].sa_mask));
2490 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
2491     sigactions[sig].sa_restorer = act->sa_restorer;
2492 #endif
2493     internal_memcpy(&newact, act, sizeof(newact));
2494     internal_sigfillset(&newact.sa_mask);
2495     if ((act->sa_flags & SA_SIGINFO) ||
2496         ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2497       newact.sa_flags |= SA_SIGINFO;
2498       newact.sigaction = sighandler;
2499     }
2500     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2501     act = &newact;
2502   }
2503   int res = REAL(sigaction)(sig, act, old);
2504   if (res == 0 && old && old->sigaction == sighandler)
2505     internal_memcpy(old, &old_stored, sizeof(*old));
2506   return res;
2507 }
2508 
signal_impl(int sig,__sanitizer_sighandler_ptr h)2509 static __sanitizer_sighandler_ptr signal_impl(int sig,
2510                                               __sanitizer_sighandler_ptr h) {
2511   __sanitizer_sigaction act;
2512   act.handler = h;
2513   internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2514   act.sa_flags = 0;
2515   __sanitizer_sigaction old;
2516   int res = sigaction_symname(sig, &act, &old);
2517   if (res) return (__sanitizer_sighandler_ptr)sig_err;
2518   return old.handler;
2519 }
2520 
2521 #define TSAN_SYSCALL()             \
2522   ThreadState *thr = cur_thread(); \
2523   if (thr->ignore_interceptors)    \
2524     return;                        \
2525   ScopedSyscall scoped_syscall(thr)
2526 
2527 struct ScopedSyscall {
2528   ThreadState *thr;
2529 
ScopedSyscallScopedSyscall2530   explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2531 
~ScopedSyscallScopedSyscall2532   ~ScopedSyscall() {
2533     ProcessPendingSignals(thr);
2534   }
2535 };
2536 
2537 #if !SANITIZER_FREEBSD && !SANITIZER_MAC
syscall_access_range(uptr pc,uptr p,uptr s,bool write)2538 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2539   TSAN_SYSCALL();
2540   MemoryAccessRange(thr, pc, p, s, write);
2541 }
2542 
syscall_acquire(uptr pc,uptr addr)2543 static USED void syscall_acquire(uptr pc, uptr addr) {
2544   TSAN_SYSCALL();
2545   Acquire(thr, pc, addr);
2546   DPrintf("syscall_acquire(0x%zx))\n", addr);
2547 }
2548 
syscall_release(uptr pc,uptr addr)2549 static USED void syscall_release(uptr pc, uptr addr) {
2550   TSAN_SYSCALL();
2551   DPrintf("syscall_release(0x%zx)\n", addr);
2552   Release(thr, pc, addr);
2553 }
2554 
syscall_fd_close(uptr pc,int fd)2555 static void syscall_fd_close(uptr pc, int fd) {
2556   TSAN_SYSCALL();
2557   FdClose(thr, pc, fd);
2558 }
2559 
syscall_fd_acquire(uptr pc,int fd)2560 static USED void syscall_fd_acquire(uptr pc, int fd) {
2561   TSAN_SYSCALL();
2562   FdAcquire(thr, pc, fd);
2563   DPrintf("syscall_fd_acquire(%d)\n", fd);
2564 }
2565 
syscall_fd_release(uptr pc,int fd)2566 static USED void syscall_fd_release(uptr pc, int fd) {
2567   TSAN_SYSCALL();
2568   DPrintf("syscall_fd_release(%d)\n", fd);
2569   FdRelease(thr, pc, fd);
2570 }
2571 
syscall_pre_fork(uptr pc)2572 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2573 
syscall_post_fork(uptr pc,int pid)2574 static void syscall_post_fork(uptr pc, int pid) {
2575   ThreadState *thr = cur_thread();
2576   if (pid == 0) {
2577     // child
2578     ForkChildAfter(thr, pc, true);
2579     FdOnFork(thr, pc);
2580   } else if (pid > 0) {
2581     // parent
2582     ForkParentAfter(thr, pc);
2583   } else {
2584     // error
2585     ForkParentAfter(thr, pc);
2586   }
2587 }
2588 #endif
2589 
2590 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2591   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2592 
2593 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2594   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2595 
2596 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2597   do {                                       \
2598     (void)(p);                               \
2599     (void)(s);                               \
2600   } while (false)
2601 
2602 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2603   do {                                        \
2604     (void)(p);                                \
2605     (void)(s);                                \
2606   } while (false)
2607 
2608 #define COMMON_SYSCALL_ACQUIRE(addr) \
2609     syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2610 
2611 #define COMMON_SYSCALL_RELEASE(addr) \
2612     syscall_release(GET_CALLER_PC(), (uptr)(addr))
2613 
2614 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2615 
2616 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2617 
2618 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2619 
2620 #define COMMON_SYSCALL_PRE_FORK() \
2621   syscall_pre_fork(GET_CALLER_PC())
2622 
2623 #define COMMON_SYSCALL_POST_FORK(res) \
2624   syscall_post_fork(GET_CALLER_PC(), res)
2625 
2626 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2627 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2628 
2629 #ifdef NEED_TLS_GET_ADDR
2630 
handle_tls_addr(void * arg,void * res)2631 static void handle_tls_addr(void *arg, void *res) {
2632   ThreadState *thr = cur_thread();
2633   if (!thr)
2634     return;
2635   DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2636                                         thr->tls_addr + thr->tls_size);
2637   if (!dtv)
2638     return;
2639   // New DTLS block has been allocated.
2640   MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2641 }
2642 
2643 #if !SANITIZER_S390
2644 // Define own interceptor instead of sanitizer_common's for three reasons:
2645 // 1. It must not process pending signals.
2646 //    Signal handlers may contain MOVDQA instruction (see below).
2647 // 2. It must be as simple as possible to not contain MOVDQA.
2648 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2649 //    is empty for tsan (meant only for msan).
2650 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2651 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2652 // So the interceptor must work with mis-aligned stack, in particular, does not
2653 // execute MOVDQA with stack addresses.
TSAN_INTERCEPTOR(void *,__tls_get_addr,void * arg)2654 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2655   void *res = REAL(__tls_get_addr)(arg);
2656   handle_tls_addr(arg, res);
2657   return res;
2658 }
2659 #else // SANITIZER_S390
TSAN_INTERCEPTOR(uptr,__tls_get_addr_internal,void * arg)2660 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2661   uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2662   char *tp = static_cast<char *>(__builtin_thread_pointer());
2663   handle_tls_addr(arg, res + tp);
2664   return res;
2665 }
2666 #endif
2667 #endif
2668 
2669 #if SANITIZER_NETBSD
TSAN_INTERCEPTOR(void,_lwp_exit)2670 TSAN_INTERCEPTOR(void, _lwp_exit) {
2671   SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2672   DestroyThreadState();
2673   REAL(_lwp_exit)();
2674 }
2675 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2676 #else
2677 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2678 #endif
2679 
2680 #if SANITIZER_FREEBSD
TSAN_INTERCEPTOR(void,thr_exit,tid_t * state)2681 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2682   SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2683   DestroyThreadState();
2684   REAL(thr_exit(state));
2685 }
2686 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2687 #else
2688 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2689 #endif
2690 
2691 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2692 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2693 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2694 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2695 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2696 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2697 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2698 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2699 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2700 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2701 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2702 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2703 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2704 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2705 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2706 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2707 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2708   void *c)
2709 
2710 namespace __tsan {
2711 
finalize(void * arg)2712 static void finalize(void *arg) {
2713   ThreadState *thr = cur_thread();
2714   int status = Finalize(thr);
2715   // Make sure the output is not lost.
2716   FlushStreams();
2717   if (status)
2718     Die();
2719 }
2720 
2721 #if !SANITIZER_MAC && !SANITIZER_ANDROID
unreachable()2722 static void unreachable() {
2723   Report("FATAL: ThreadSanitizer: unreachable called\n");
2724   Die();
2725 }
2726 #endif
2727 
2728 // Define default implementation since interception of libdispatch  is optional.
InitializeLibdispatchInterceptors()2729 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2730 
InitializeInterceptors()2731 void InitializeInterceptors() {
2732 #if !SANITIZER_MAC
2733   // We need to setup it early, because functions like dlsym() can call it.
2734   REAL(memset) = internal_memset;
2735   REAL(memcpy) = internal_memcpy;
2736 #endif
2737 
2738   new(interceptor_ctx()) InterceptorContext();
2739 
2740   InitializeCommonInterceptors();
2741   InitializeSignalInterceptors();
2742   InitializeLibdispatchInterceptors();
2743 
2744 #if !SANITIZER_MAC
2745   // We can not use TSAN_INTERCEPT to get setjmp addr,
2746   // because it does &setjmp and setjmp is not present in some versions of libc.
2747   using __interception::InterceptFunction;
2748   InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
2749   InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
2750   InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
2751                     0);
2752 #if !SANITIZER_NETBSD
2753   InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
2754 #endif
2755 #endif
2756 
2757   TSAN_INTERCEPT(longjmp_symname);
2758   TSAN_INTERCEPT(siglongjmp_symname);
2759 #if SANITIZER_NETBSD
2760   TSAN_INTERCEPT(_longjmp);
2761 #endif
2762 
2763   TSAN_INTERCEPT(malloc);
2764   TSAN_INTERCEPT(__libc_memalign);
2765   TSAN_INTERCEPT(calloc);
2766   TSAN_INTERCEPT(realloc);
2767   TSAN_INTERCEPT(reallocarray);
2768   TSAN_INTERCEPT(free);
2769   TSAN_INTERCEPT(cfree);
2770   TSAN_INTERCEPT(munmap);
2771   TSAN_MAYBE_INTERCEPT_MEMALIGN;
2772   TSAN_INTERCEPT(valloc);
2773   TSAN_MAYBE_INTERCEPT_PVALLOC;
2774   TSAN_INTERCEPT(posix_memalign);
2775 
2776   TSAN_INTERCEPT(strcpy);
2777   TSAN_INTERCEPT(strncpy);
2778   TSAN_INTERCEPT(strdup);
2779 
2780   TSAN_INTERCEPT(pthread_create);
2781   TSAN_INTERCEPT(pthread_join);
2782   TSAN_INTERCEPT(pthread_detach);
2783   TSAN_INTERCEPT(pthread_exit);
2784   #if SANITIZER_LINUX
2785   TSAN_INTERCEPT(pthread_tryjoin_np);
2786   TSAN_INTERCEPT(pthread_timedjoin_np);
2787   #endif
2788 
2789   TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2790   TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2791   TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2792   TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2793   TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2794   TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2795 
2796   TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2797 
2798   TSAN_INTERCEPT(pthread_mutex_init);
2799   TSAN_INTERCEPT(pthread_mutex_destroy);
2800   TSAN_INTERCEPT(pthread_mutex_trylock);
2801   TSAN_INTERCEPT(pthread_mutex_timedlock);
2802 
2803   TSAN_INTERCEPT(pthread_spin_init);
2804   TSAN_INTERCEPT(pthread_spin_destroy);
2805   TSAN_INTERCEPT(pthread_spin_lock);
2806   TSAN_INTERCEPT(pthread_spin_trylock);
2807   TSAN_INTERCEPT(pthread_spin_unlock);
2808 
2809   TSAN_INTERCEPT(pthread_rwlock_init);
2810   TSAN_INTERCEPT(pthread_rwlock_destroy);
2811   TSAN_INTERCEPT(pthread_rwlock_rdlock);
2812   TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
2813   TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
2814   TSAN_INTERCEPT(pthread_rwlock_wrlock);
2815   TSAN_INTERCEPT(pthread_rwlock_trywrlock);
2816   TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
2817   TSAN_INTERCEPT(pthread_rwlock_unlock);
2818 
2819   TSAN_INTERCEPT(pthread_barrier_init);
2820   TSAN_INTERCEPT(pthread_barrier_destroy);
2821   TSAN_INTERCEPT(pthread_barrier_wait);
2822 
2823   TSAN_INTERCEPT(pthread_once);
2824 
2825   TSAN_INTERCEPT(fstat);
2826   TSAN_MAYBE_INTERCEPT___FXSTAT;
2827   TSAN_MAYBE_INTERCEPT_FSTAT64;
2828   TSAN_MAYBE_INTERCEPT___FXSTAT64;
2829   TSAN_INTERCEPT(open);
2830   TSAN_MAYBE_INTERCEPT_OPEN64;
2831   TSAN_INTERCEPT(creat);
2832   TSAN_MAYBE_INTERCEPT_CREAT64;
2833   TSAN_INTERCEPT(dup);
2834   TSAN_INTERCEPT(dup2);
2835   TSAN_INTERCEPT(dup3);
2836   TSAN_MAYBE_INTERCEPT_EVENTFD;
2837   TSAN_MAYBE_INTERCEPT_SIGNALFD;
2838   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
2839   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
2840   TSAN_INTERCEPT(socket);
2841   TSAN_INTERCEPT(socketpair);
2842   TSAN_INTERCEPT(connect);
2843   TSAN_INTERCEPT(bind);
2844   TSAN_INTERCEPT(listen);
2845   TSAN_MAYBE_INTERCEPT_EPOLL;
2846   TSAN_INTERCEPT(close);
2847   TSAN_MAYBE_INTERCEPT___CLOSE;
2848   TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
2849   TSAN_INTERCEPT(pipe);
2850   TSAN_INTERCEPT(pipe2);
2851 
2852   TSAN_INTERCEPT(unlink);
2853   TSAN_INTERCEPT(tmpfile);
2854   TSAN_MAYBE_INTERCEPT_TMPFILE64;
2855   TSAN_INTERCEPT(abort);
2856   TSAN_INTERCEPT(rmdir);
2857   TSAN_INTERCEPT(closedir);
2858 
2859   TSAN_INTERCEPT(sigsuspend);
2860   TSAN_INTERCEPT(sigblock);
2861   TSAN_INTERCEPT(sigsetmask);
2862   TSAN_INTERCEPT(pthread_sigmask);
2863   TSAN_INTERCEPT(raise);
2864   TSAN_INTERCEPT(kill);
2865   TSAN_INTERCEPT(pthread_kill);
2866   TSAN_INTERCEPT(sleep);
2867   TSAN_INTERCEPT(usleep);
2868   TSAN_INTERCEPT(nanosleep);
2869   TSAN_INTERCEPT(pause);
2870   TSAN_INTERCEPT(gettimeofday);
2871   TSAN_INTERCEPT(getaddrinfo);
2872 
2873   TSAN_INTERCEPT(fork);
2874   TSAN_INTERCEPT(vfork);
2875 #if SANITIZER_LINUX
2876   TSAN_INTERCEPT(clone);
2877 #endif
2878 #if !SANITIZER_ANDROID
2879   TSAN_INTERCEPT(dl_iterate_phdr);
2880 #endif
2881   TSAN_MAYBE_INTERCEPT_ON_EXIT;
2882   TSAN_INTERCEPT(__cxa_atexit);
2883   TSAN_INTERCEPT(_exit);
2884 
2885 #ifdef NEED_TLS_GET_ADDR
2886 #if !SANITIZER_S390
2887   TSAN_INTERCEPT(__tls_get_addr);
2888 #else
2889   TSAN_INTERCEPT(__tls_get_addr_internal);
2890   TSAN_INTERCEPT(__tls_get_offset);
2891 #endif
2892 #endif
2893 
2894   TSAN_MAYBE_INTERCEPT__LWP_EXIT;
2895   TSAN_MAYBE_INTERCEPT_THR_EXIT;
2896 
2897 #if !SANITIZER_MAC && !SANITIZER_ANDROID
2898   // Need to setup it, because interceptors check that the function is resolved.
2899   // But atexit is emitted directly into the module, so can't be resolved.
2900   REAL(atexit) = (int(*)(void(*)()))unreachable;
2901 #endif
2902 
2903   if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
2904     Printf("ThreadSanitizer: failed to setup atexit callback\n");
2905     Die();
2906   }
2907   if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
2908     Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
2909     Die();
2910   }
2911 
2912 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
2913   if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
2914     Printf("ThreadSanitizer: failed to create thread key\n");
2915     Die();
2916   }
2917 #endif
2918 
2919   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
2920   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
2921   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
2922   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
2923   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
2924   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
2925   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
2926   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
2927   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
2928   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
2929   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
2930   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
2931   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
2932   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
2933   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
2934   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
2935   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
2936 
2937   FdInit();
2938 }
2939 
2940 }  // namespace __tsan
2941 
2942 // Invisible barrier for tests.
2943 // There were several unsuccessful iterations for this functionality:
2944 // 1. Initially it was implemented in user code using
2945 //    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
2946 //    MacOS. Futexes are linux-specific for this matter.
2947 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
2948 //    "as-if synchronized via sleep" messages in reports which failed some
2949 //    output tests.
2950 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
2951 //    visible events, which lead to "failed to restore stack trace" failures.
2952 // Note that no_sanitize_thread attribute does not turn off atomic interception
2953 // so attaching it to the function defined in user code does not help.
2954 // That's why we now have what we have.
2955 constexpr u32 kBarrierThreadBits = 10;
2956 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
2957 
__tsan_testonly_barrier_init(atomic_uint32_t * barrier,u32 num_threads)2958 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
2959     atomic_uint32_t *barrier, u32 num_threads) {
2960   if (num_threads >= kBarrierThreads) {
2961     Printf("barrier_init: count is too large (%d)\n", num_threads);
2962     Die();
2963   }
2964   // kBarrierThreadBits lsb is thread count,
2965   // the remaining are count of entered threads.
2966   atomic_store(barrier, num_threads, memory_order_relaxed);
2967 }
2968 
barrier_epoch(u32 value)2969 static u32 barrier_epoch(u32 value) {
2970   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
2971 }
2972 
__tsan_testonly_barrier_wait(atomic_uint32_t * barrier)2973 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
2974     atomic_uint32_t *barrier) {
2975   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
2976   u32 old_epoch = barrier_epoch(old);
2977   if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
2978     FutexWake(barrier, (1 << 30));
2979     return;
2980   }
2981   for (;;) {
2982     u32 cur = atomic_load(barrier, memory_order_relaxed);
2983     if (barrier_epoch(cur) != old_epoch)
2984       return;
2985     FutexWait(barrier, cur);
2986   }
2987 }
2988