xref: /llvm-project/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp (revision 9a156f6b2b0c892d8713ba907f07f027b24953d8)
1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 // FIXME: move as many interceptors as possible into
12 // sanitizer_common/sanitizer_common_interceptors.inc
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_common/sanitizer_allocator_dlsym.h"
16 #include "sanitizer_common/sanitizer_atomic.h"
17 #include "sanitizer_common/sanitizer_errno.h"
18 #include "sanitizer_common/sanitizer_glibc_version.h"
19 #include "sanitizer_common/sanitizer_internal_defs.h"
20 #include "sanitizer_common/sanitizer_libc.h"
21 #include "sanitizer_common/sanitizer_linux.h"
22 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
23 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
24 #include "sanitizer_common/sanitizer_posix.h"
25 #include "sanitizer_common/sanitizer_stacktrace.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27 #include "interception/interception.h"
28 #include "tsan_interceptors.h"
29 #include "tsan_interface.h"
30 #include "tsan_platform.h"
31 #include "tsan_suppressions.h"
32 #include "tsan_rtl.h"
33 #include "tsan_mman.h"
34 #include "tsan_fd.h"
35 
36 #include <stdarg.h>
37 
38 using namespace __tsan;
39 
40 DECLARE_REAL(void *, memcpy, void *to, const void *from, SIZE_T size)
41 DECLARE_REAL(void *, memset, void *block, int c, SIZE_T size)
42 
43 #if SANITIZER_FREEBSD || SANITIZER_APPLE
44 #define stdout __stdoutp
45 #define stderr __stderrp
46 #endif
47 
48 #if SANITIZER_NETBSD
49 #define dirfd(dirp) (*(int *)(dirp))
50 #define fileno_unlocked(fp)              \
51   (((__sanitizer_FILE *)fp)->_file == -1 \
52        ? -1                              \
53        : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file))
54 
55 #define stdout ((__sanitizer_FILE*)&__sF[1])
56 #define stderr ((__sanitizer_FILE*)&__sF[2])
57 
58 #define nanosleep __nanosleep50
59 #define vfork __vfork14
60 #endif
61 
62 #ifdef __mips__
63 const int kSigCount = 129;
64 #else
65 const int kSigCount = 65;
66 #endif
67 
68 #ifdef __mips__
69 struct ucontext_t {
70   u64 opaque[768 / sizeof(u64) + 1];
71 };
72 #else
73 struct ucontext_t {
74   // The size is determined by looking at sizeof of real ucontext_t on linux.
75   u64 opaque[936 / sizeof(u64) + 1];
76 };
77 #endif
78 
79 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \
80     defined(__s390x__)
81 #define PTHREAD_ABI_BASE  "GLIBC_2.3.2"
82 #elif defined(__aarch64__) || SANITIZER_PPC64V2
83 #define PTHREAD_ABI_BASE  "GLIBC_2.17"
84 #elif SANITIZER_LOONGARCH64
85 #define PTHREAD_ABI_BASE  "GLIBC_2.36"
86 #elif SANITIZER_RISCV64
87 #  define PTHREAD_ABI_BASE "GLIBC_2.27"
88 #endif
89 
90 extern "C" int pthread_attr_init(void *attr);
91 extern "C" int pthread_attr_destroy(void *attr);
92 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
93 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
94 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void),
95                               void (*child)(void));
96 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
97 extern "C" int pthread_setspecific(unsigned key, const void *v);
98 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
99 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
100 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, usize size)
101 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
102 extern "C" int pthread_equal(void *t1, void *t2);
103 extern "C" void *pthread_self();
104 extern "C" void _exit(int status);
105 #if !SANITIZER_NETBSD
106 extern "C" int fileno_unlocked(void *stream);
107 extern "C" int dirfd(void *dirp);
108 #endif
109 #if SANITIZER_NETBSD
110 extern __sanitizer_FILE __sF[];
111 #else
112 extern __sanitizer_FILE *stdout, *stderr;
113 #endif
114 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
115 const int PTHREAD_MUTEX_RECURSIVE = 1;
116 const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
117 #else
118 const int PTHREAD_MUTEX_RECURSIVE = 2;
119 const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
120 #endif
121 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
122 const int EPOLL_CTL_ADD = 1;
123 #endif
124 const int SIGILL = 4;
125 const int SIGTRAP = 5;
126 const int SIGABRT = 6;
127 const int SIGFPE = 8;
128 const int SIGSEGV = 11;
129 const int SIGPIPE = 13;
130 const int SIGTERM = 15;
131 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
132 const int SIGBUS = 10;
133 const int SIGSYS = 12;
134 #else
135 const int SIGBUS = 7;
136 const int SIGSYS = 31;
137 #endif
138 #if SANITIZER_HAS_SIGINFO
139 const int SI_TIMER = -2;
140 #endif
141 void *const MAP_FAILED = (void*)-1;
142 #if SANITIZER_NETBSD
143 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
144 #elif !SANITIZER_APPLE
145 const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
146 #endif
147 const int MAP_FIXED = 0x10;
148 typedef long long_t;
149 typedef __sanitizer::u16 mode_t;
150 
151 // From /usr/include/unistd.h
152 # define F_ULOCK 0      /* Unlock a previously locked region.  */
153 # define F_LOCK  1      /* Lock a region for exclusive use.  */
154 # define F_TLOCK 2      /* Test and lock a region for exclusive use.  */
155 # define F_TEST  3      /* Test a region for other processes locks.  */
156 
157 #if SANITIZER_FREEBSD || SANITIZER_APPLE || SANITIZER_NETBSD
158 const int SA_SIGINFO = 0x40;
159 const int SIG_SETMASK = 3;
160 #elif defined(__mips__)
161 const int SA_SIGINFO = 8;
162 const int SIG_SETMASK = 3;
163 #else
164 const int SA_SIGINFO = 4;
165 const int SIG_SETMASK = 2;
166 #endif
167 
168 namespace __tsan {
169 struct SignalDesc {
170   bool armed;
171   __sanitizer_siginfo siginfo;
172   ucontext_t ctx;
173 };
174 
175 struct ThreadSignalContext {
176   int int_signal_send;
177   SignalDesc pending_signals[kSigCount];
178   // emptyset and oldset are too big for stack.
179   __sanitizer_sigset_t emptyset;
180   __sanitizer_sigset_t oldset;
181 };
182 
183 void EnterBlockingFunc(ThreadState *thr) {
184   for (;;) {
185     // The order is important to not delay a signal infinitely if it's
186     // delivered right before we set in_blocking_func. Note: we can't call
187     // ProcessPendingSignals when in_blocking_func is set, or we can handle
188     // a signal synchronously when we are already handling a signal.
189     atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
190     if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0)
191       break;
192     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
193     ProcessPendingSignals(thr);
194   }
195 }
196 
197 // The sole reason tsan wraps atexit callbacks is to establish synchronization
198 // between callback setup and callback execution.
199 struct AtExitCtx {
200   void (*f)();
201   void *arg;
202   uptr pc;
203 };
204 
205 // InterceptorContext holds all global data required for interceptors.
206 // It's explicitly constructed in InitializeInterceptors with placement new
207 // and is never destroyed. This allows usage of members with non-trivial
208 // constructors and destructors.
209 struct InterceptorContext {
210   // The object is 64-byte aligned, because we want hot data to be located
211   // in a single cache line if possible (it's accessed in every interceptor).
212   alignas(64) LibIgnore libignore;
213   __sanitizer_sigaction sigactions[kSigCount];
214 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
215   unsigned finalize_key;
216 #endif
217 
218   Mutex atexit_mu;
219   Vector<struct AtExitCtx *> AtExitStack;
220 
221   InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {}
222 };
223 
224 alignas(64) static char interceptor_placeholder[sizeof(InterceptorContext)];
225 InterceptorContext *interceptor_ctx() {
226   return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
227 }
228 
229 LibIgnore *libignore() {
230   return &interceptor_ctx()->libignore;
231 }
232 
233 void InitializeLibIgnore() {
234   const SuppressionContext &supp = *Suppressions();
235   const uptr n = supp.SuppressionCount();
236   for (uptr i = 0; i < n; i++) {
237     const Suppression *s = supp.SuppressionAt(i);
238     if (0 == internal_strcmp(s->type, kSuppressionLib))
239       libignore()->AddIgnoredLibrary(s->templ);
240   }
241   if (flags()->ignore_noninstrumented_modules)
242     libignore()->IgnoreNoninstrumentedModules(true);
243   libignore()->OnLibraryLoaded(0);
244 }
245 
246 // The following two hooks can be used by for cooperative scheduling when
247 // locking.
248 #ifdef TSAN_EXTERNAL_HOOKS
249 void OnPotentiallyBlockingRegionBegin();
250 void OnPotentiallyBlockingRegionEnd();
251 #else
252 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
253 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
254 #endif
255 
256 // FIXME: Use for `in_symbolizer()` as well. As-is we can't use
257 // `DlSymAllocator`, because it uses the primary allocator only. Symbolizer
258 // requires support of the secondary allocator for larger blocks.
259 struct DlsymAlloc : public DlSymAllocator<DlsymAlloc> {
260   static bool UseImpl() { return (ctx && !ctx->initialized); }
261 };
262 
263 }  // namespace __tsan
264 
265 static ThreadSignalContext *SigCtx(ThreadState *thr) {
266   // This function may be called reentrantly if it is interrupted by a signal
267   // handler. Use CAS to handle the race.
268   uptr ctx = atomic_load(&thr->signal_ctx, memory_order_relaxed);
269   if (ctx == 0 && !thr->is_dead) {
270     uptr pctx =
271         (uptr)MmapOrDie(sizeof(ThreadSignalContext), "ThreadSignalContext");
272     MemoryResetRange(thr, (uptr)&SigCtx, pctx, sizeof(ThreadSignalContext));
273     if (atomic_compare_exchange_strong(&thr->signal_ctx, &ctx, pctx,
274                                        memory_order_relaxed)) {
275       ctx = pctx;
276     } else {
277       UnmapOrDie((ThreadSignalContext *)pctx, sizeof(ThreadSignalContext));
278     }
279   }
280   return (ThreadSignalContext *)ctx;
281 }
282 
283 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
284                                      uptr pc)
285     : thr_(thr) {
286   LazyInitialize(thr);
287   if (UNLIKELY(atomic_load(&thr->in_blocking_func, memory_order_relaxed))) {
288     // pthread_join is marked as blocking, but it's also known to call other
289     // intercepted functions (mmap, free). If we don't reset in_blocking_func
290     // we can get deadlocks and memory corruptions if we deliver a synchronous
291     // signal inside of an mmap/free interceptor.
292     // So reset it and restore it back in the destructor.
293     // See https://github.com/google/sanitizers/issues/1540
294     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
295     in_blocking_func_ = true;
296   }
297   if (!thr_->is_inited) return;
298   if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
299   DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
300   ignoring_ =
301       !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses ||
302                                 libignore()->IsIgnored(pc, &in_ignored_lib_));
303   EnableIgnores();
304 }
305 
306 ScopedInterceptor::~ScopedInterceptor() {
307   if (!thr_->is_inited) return;
308   DisableIgnores();
309   if (UNLIKELY(in_blocking_func_))
310     EnterBlockingFunc(thr_);
311   if (!thr_->ignore_interceptors) {
312     ProcessPendingSignals(thr_);
313     FuncExit(thr_);
314     CheckedMutex::CheckNoLocks();
315   }
316 }
317 
318 NOINLINE
319 void ScopedInterceptor::EnableIgnoresImpl() {
320   ThreadIgnoreBegin(thr_, 0);
321   if (flags()->ignore_noninstrumented_modules)
322     thr_->suppress_reports++;
323   if (in_ignored_lib_) {
324     DCHECK(!thr_->in_ignored_lib);
325     thr_->in_ignored_lib = true;
326   }
327 }
328 
329 NOINLINE
330 void ScopedInterceptor::DisableIgnoresImpl() {
331   ThreadIgnoreEnd(thr_);
332   if (flags()->ignore_noninstrumented_modules)
333     thr_->suppress_reports--;
334   if (in_ignored_lib_) {
335     DCHECK(thr_->in_ignored_lib);
336     thr_->in_ignored_lib = false;
337   }
338 }
339 
340 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
341 #if SANITIZER_FREEBSD || SANITIZER_NETBSD
342 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
343 #else
344 #  define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
345 #endif
346 #if SANITIZER_FREEBSD
347 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \
348     INTERCEPT_FUNCTION(_pthread_##func)
349 #else
350 #  define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func)
351 #endif
352 #if SANITIZER_NETBSD
353 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
354     INTERCEPT_FUNCTION(__libc_##func)
355 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
356     INTERCEPT_FUNCTION(__libc_thr_##func)
357 #else
358 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
359 #  define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
360 #endif
361 
362 #define READ_STRING_OF_LEN(thr, pc, s, len, n)                 \
363   MemoryAccessRange((thr), (pc), (uptr)(s),                         \
364     common_flags()->strict_string_checks ? (len) + 1 : (n), false)
365 
366 #define READ_STRING(thr, pc, s, n)                             \
367     READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
368 
369 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
370 
371 struct BlockingCall {
372   explicit BlockingCall(ThreadState *thr)
373       : thr(thr) {
374     EnterBlockingFunc(thr);
375     // When we are in a "blocking call", we process signals asynchronously
376     // (right when they arrive). In this context we do not expect to be
377     // executing any user/runtime code. The known interceptor sequence when
378     // this is not true is: pthread_join -> munmap(stack). It's fine
379     // to ignore munmap in this case -- we handle stack shadow separately.
380     thr->ignore_interceptors++;
381   }
382 
383   ~BlockingCall() {
384     thr->ignore_interceptors--;
385     atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
386   }
387 
388   ThreadState *thr;
389 };
390 
391 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
392   SCOPED_TSAN_INTERCEPTOR(sleep, sec);
393   unsigned res = BLOCK_REAL(sleep)(sec);
394   AfterSleep(thr, pc);
395   return res;
396 }
397 
398 TSAN_INTERCEPTOR(int, usleep, long_t usec) {
399   SCOPED_TSAN_INTERCEPTOR(usleep, usec);
400   int res = BLOCK_REAL(usleep)(usec);
401   AfterSleep(thr, pc);
402   return res;
403 }
404 
405 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
406   SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
407   int res = BLOCK_REAL(nanosleep)(req, rem);
408   AfterSleep(thr, pc);
409   return res;
410 }
411 
412 TSAN_INTERCEPTOR(int, pause, int fake) {
413   SCOPED_TSAN_INTERCEPTOR(pause, fake);
414   return BLOCK_REAL(pause)(fake);
415 }
416 
417 // Note: we specifically call the function in such strange way
418 // with "installed_at" because in reports it will appear between
419 // callback frames and the frame that installed the callback.
420 static void at_exit_callback_installed_at() {
421   AtExitCtx *ctx;
422   {
423     // Ensure thread-safety.
424     Lock l(&interceptor_ctx()->atexit_mu);
425 
426     // Pop AtExitCtx from the top of the stack of callback functions
427     uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
428     ctx = interceptor_ctx()->AtExitStack[element];
429     interceptor_ctx()->AtExitStack.PopBack();
430   }
431 
432   ThreadState *thr = cur_thread();
433   Acquire(thr, ctx->pc, (uptr)ctx);
434   FuncEntry(thr, ctx->pc);
435   ((void(*)())ctx->f)();
436   FuncExit(thr);
437   Free(ctx);
438 }
439 
440 static void cxa_at_exit_callback_installed_at(void *arg) {
441   ThreadState *thr = cur_thread();
442   AtExitCtx *ctx = (AtExitCtx*)arg;
443   Acquire(thr, ctx->pc, (uptr)arg);
444   FuncEntry(thr, ctx->pc);
445   ((void(*)(void *arg))ctx->f)(ctx->arg);
446   FuncExit(thr);
447   Free(ctx);
448 }
449 
450 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
451       void *arg, void *dso);
452 
453 #if !SANITIZER_ANDROID
454 TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
455   if (in_symbolizer())
456     return 0;
457   // We want to setup the atexit callback even if we are in ignored lib
458   // or after fork.
459   SCOPED_INTERCEPTOR_RAW(atexit, f);
460   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0);
461 }
462 #endif
463 
464 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
465   if (in_symbolizer())
466     return 0;
467   SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
468   return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso);
469 }
470 
471 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
472       void *arg, void *dso) {
473   auto *ctx = New<AtExitCtx>();
474   ctx->f = f;
475   ctx->arg = arg;
476   ctx->pc = pc;
477   Release(thr, pc, (uptr)ctx);
478   // Memory allocation in __cxa_atexit will race with free during exit,
479   // because we do not see synchronization around atexit callback list.
480   ThreadIgnoreBegin(thr, pc);
481   int res;
482   if (!dso) {
483     // NetBSD does not preserve the 2nd argument if dso is equal to 0
484     // Store ctx in a local stack-like structure
485 
486     // Ensure thread-safety.
487     Lock l(&interceptor_ctx()->atexit_mu);
488     // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail
489     // due to atexit_mu held on exit from the calloc interceptor.
490     ScopedIgnoreInterceptors ignore;
491 
492     res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at,
493                              0, 0);
494     // Push AtExitCtx on the top of the stack of callback functions
495     if (!res) {
496       interceptor_ctx()->AtExitStack.PushBack(ctx);
497     }
498   } else {
499     res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso);
500   }
501   ThreadIgnoreEnd(thr);
502   return res;
503 }
504 
505 #if !SANITIZER_APPLE && !SANITIZER_NETBSD
506 static void on_exit_callback_installed_at(int status, void *arg) {
507   ThreadState *thr = cur_thread();
508   AtExitCtx *ctx = (AtExitCtx*)arg;
509   Acquire(thr, ctx->pc, (uptr)arg);
510   FuncEntry(thr, ctx->pc);
511   ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
512   FuncExit(thr);
513   Free(ctx);
514 }
515 
516 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
517   if (in_symbolizer())
518     return 0;
519   SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
520   auto *ctx = New<AtExitCtx>();
521   ctx->f = (void(*)())f;
522   ctx->arg = arg;
523   ctx->pc = GET_CALLER_PC();
524   Release(thr, pc, (uptr)ctx);
525   // Memory allocation in __cxa_atexit will race with free during exit,
526   // because we do not see synchronization around atexit callback list.
527   ThreadIgnoreBegin(thr, pc);
528   int res = REAL(on_exit)(on_exit_callback_installed_at, ctx);
529   ThreadIgnoreEnd(thr);
530   return res;
531 }
532 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
533 #else
534 #define TSAN_MAYBE_INTERCEPT_ON_EXIT
535 #endif
536 
537 // Cleanup old bufs.
538 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
539   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
540     JmpBuf *buf = &thr->jmp_bufs[i];
541     if (buf->sp <= sp) {
542       uptr sz = thr->jmp_bufs.Size();
543       internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
544       thr->jmp_bufs.PopBack();
545       i--;
546     }
547   }
548 }
549 
550 static void SetJmp(ThreadState *thr, uptr sp) {
551   if (!thr->is_inited)  // called from libc guts during bootstrap
552     return;
553   // Cleanup old bufs.
554   JmpBufGarbageCollect(thr, sp);
555   // Remember the buf.
556   JmpBuf *buf = thr->jmp_bufs.PushBack();
557   buf->sp = sp;
558   buf->shadow_stack_pos = thr->shadow_stack_pos;
559   ThreadSignalContext *sctx = SigCtx(thr);
560   buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
561   buf->in_blocking_func = atomic_load(&thr->in_blocking_func, memory_order_relaxed);
562   buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
563       memory_order_relaxed);
564 }
565 
566 static void LongJmp(ThreadState *thr, uptr *env) {
567   uptr sp = ExtractLongJmpSp(env);
568   // Find the saved buf with matching sp.
569   for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
570     JmpBuf *buf = &thr->jmp_bufs[i];
571     if (buf->sp == sp) {
572       CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
573       // Unwind the stack.
574       while (thr->shadow_stack_pos > buf->shadow_stack_pos)
575         FuncExit(thr);
576       ThreadSignalContext *sctx = SigCtx(thr);
577       if (sctx)
578         sctx->int_signal_send = buf->int_signal_send;
579       atomic_store(&thr->in_blocking_func, buf->in_blocking_func,
580           memory_order_relaxed);
581       atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
582           memory_order_relaxed);
583       JmpBufGarbageCollect(thr, buf->sp - 1);  // do not collect buf->sp
584       return;
585     }
586   }
587   Printf("ThreadSanitizer: can't find longjmp buf\n");
588   CHECK(0);
589 }
590 
591 // FIXME: put everything below into a common extern "C" block?
592 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); }
593 
594 #if SANITIZER_APPLE
595 TSAN_INTERCEPTOR(int, setjmp, void *env);
596 TSAN_INTERCEPTOR(int, _setjmp, void *env);
597 TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
598 #else  // SANITIZER_APPLE
599 
600 #if SANITIZER_NETBSD
601 #define setjmp_symname __setjmp14
602 #define sigsetjmp_symname __sigsetjmp14
603 #else
604 #define setjmp_symname setjmp
605 #define sigsetjmp_symname sigsetjmp
606 #endif
607 
608 DEFINE_REAL(int, setjmp_symname, void *env)
609 DEFINE_REAL(int, _setjmp, void *env)
610 DEFINE_REAL(int, sigsetjmp_symname, void *env)
611 #if !SANITIZER_NETBSD
612 DEFINE_REAL(int, __sigsetjmp, void *env)
613 #endif
614 
615 // The real interceptor for setjmp is special, and implemented in pure asm. We
616 // just need to initialize the REAL functions so that they can be used in asm.
617 static void InitializeSetjmpInterceptors() {
618   // We can not use TSAN_INTERCEPT to get setjmp addr, because it does &setjmp and
619   // setjmp is not present in some versions of libc.
620   using __interception::InterceptFunction;
621   InterceptFunction(SANITIZER_STRINGIFY(setjmp_symname), (uptr*)&REAL(setjmp_symname), 0, 0);
622   InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
623   InterceptFunction(SANITIZER_STRINGIFY(sigsetjmp_symname), (uptr*)&REAL(sigsetjmp_symname), 0,
624                     0);
625 #if !SANITIZER_NETBSD
626   InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
627 #endif
628 }
629 #endif  // SANITIZER_APPLE
630 
631 #if SANITIZER_NETBSD
632 #define longjmp_symname __longjmp14
633 #define siglongjmp_symname __siglongjmp14
634 #else
635 #define longjmp_symname longjmp
636 #define siglongjmp_symname siglongjmp
637 #endif
638 
639 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
640   // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
641   // bad things will happen. We will jump over ScopedInterceptor dtor and can
642   // leave thr->in_ignored_lib set.
643   {
644     SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
645   }
646   LongJmp(cur_thread(), env);
647   REAL(longjmp_symname)(env, val);
648 }
649 
650 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
651   {
652     SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
653   }
654   LongJmp(cur_thread(), env);
655   REAL(siglongjmp_symname)(env, val);
656 }
657 
658 #if SANITIZER_NETBSD
659 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
660   {
661     SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
662   }
663   LongJmp(cur_thread(), env);
664   REAL(_longjmp)(env, val);
665 }
666 #endif
667 
668 #if !SANITIZER_APPLE
669 TSAN_INTERCEPTOR(void*, malloc, uptr size) {
670   if (in_symbolizer())
671     return InternalAlloc(size);
672   if (DlsymAlloc::Use())
673     return DlsymAlloc::Allocate(size);
674   void *p = 0;
675   {
676     SCOPED_INTERCEPTOR_RAW(malloc, size);
677     p = user_alloc(thr, pc, size);
678   }
679   invoke_malloc_hook(p, size);
680   return p;
681 }
682 
683 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept
684 // __libc_memalign so that (1) we can detect races (2) free will not be called
685 // on libc internally allocated blocks.
686 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
687   SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz);
688   return user_memalign(thr, pc, align, sz);
689 }
690 
691 TSAN_INTERCEPTOR(void *, calloc, uptr n, uptr size) {
692   if (in_symbolizer())
693     return InternalCalloc(n, size);
694   if (DlsymAlloc::Use())
695     return DlsymAlloc::Callocate(n, size);
696   void *p = 0;
697   {
698     SCOPED_INTERCEPTOR_RAW(calloc, n, size);
699     p = user_calloc(thr, pc, size, n);
700   }
701   invoke_malloc_hook(p, n * size);
702   return p;
703 }
704 
705 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
706   if (in_symbolizer())
707     return InternalRealloc(p, size);
708   if (DlsymAlloc::Use() || DlsymAlloc::PointerIsMine(p))
709     return DlsymAlloc::Realloc(p, size);
710   if (p)
711     invoke_free_hook(p);
712   {
713     SCOPED_INTERCEPTOR_RAW(realloc, p, size);
714     p = user_realloc(thr, pc, p, size);
715   }
716   invoke_malloc_hook(p, size);
717   return p;
718 }
719 
720 TSAN_INTERCEPTOR(void *, reallocarray, void *p, uptr n, uptr size) {
721   if (in_symbolizer())
722     return InternalReallocArray(p, n, size);
723   if (p)
724     invoke_free_hook(p);
725   {
726     SCOPED_INTERCEPTOR_RAW(reallocarray, p, n, size);
727     p = user_reallocarray(thr, pc, p, size, n);
728   }
729   invoke_malloc_hook(p, size);
730   return p;
731 }
732 
733 TSAN_INTERCEPTOR(void, free, void *p) {
734   if (UNLIKELY(!p))
735     return;
736   if (in_symbolizer())
737     return InternalFree(p);
738   if (DlsymAlloc::PointerIsMine(p))
739     return DlsymAlloc::Free(p);
740   invoke_free_hook(p);
741   SCOPED_INTERCEPTOR_RAW(free, p);
742   user_free(thr, pc, p);
743 }
744 
745 TSAN_INTERCEPTOR(void, cfree, void *p) {
746   if (UNLIKELY(!p))
747     return;
748   if (in_symbolizer())
749     return InternalFree(p);
750   if (DlsymAlloc::PointerIsMine(p))
751     return DlsymAlloc::Free(p);
752   invoke_free_hook(p);
753   SCOPED_INTERCEPTOR_RAW(cfree, p);
754   user_free(thr, pc, p);
755 }
756 
757 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
758   SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
759   return user_alloc_usable_size(p);
760 }
761 #endif
762 
763 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) {
764   SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src);
765   uptr srclen = internal_strlen(src);
766   MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
767   MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
768   return REAL(strcpy)(dst, src);
769 }
770 
771 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, usize n) {
772   SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
773   uptr srclen = internal_strnlen(src, n);
774   MemoryAccessRange(thr, pc, (uptr)dst, n, true);
775   MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
776   return REAL(strncpy)(dst, src, n);
777 }
778 
779 TSAN_INTERCEPTOR(char*, strdup, const char *str) {
780   SCOPED_TSAN_INTERCEPTOR(strdup, str);
781   // strdup will call malloc, so no instrumentation is required here.
782   return REAL(strdup)(str);
783 }
784 
785 // Zero out addr if it points into shadow memory and was provided as a hint
786 // only, i.e., MAP_FIXED is not set.
787 static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
788   if (*addr) {
789     if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
790       if (flags & MAP_FIXED) {
791         errno = errno_EINVAL;
792         return false;
793       } else {
794         *addr = 0;
795       }
796     }
797   }
798   return true;
799 }
800 
801 template <class Mmap>
802 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
803                               void *addr, SIZE_T sz, int prot, int flags,
804                               int fd, OFF64_T off) {
805   if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
806   void *res = real_mmap(addr, sz, prot, flags, fd, off);
807   if (res != MAP_FAILED) {
808     if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) {
809       Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n",
810              addr, (void*)sz, res);
811       Die();
812     }
813     if (fd > 0) FdAccess(thr, pc, fd);
814     MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz);
815   }
816   return res;
817 }
818 
819 template <class Munmap>
820 static int munmap_interceptor(ThreadState *thr, uptr pc, Munmap real_munmap,
821                                 void *addr, SIZE_T sz) {
822   UnmapShadow(thr, (uptr)addr, sz);
823   int res = real_munmap(addr, sz);
824   return res;
825 }
826 
827 #if SANITIZER_LINUX
828 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
829   SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
830   return user_memalign(thr, pc, align, sz);
831 }
832 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
833 #else
834 #define TSAN_MAYBE_INTERCEPT_MEMALIGN
835 #endif
836 
837 #if !SANITIZER_APPLE
838 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
839   if (in_symbolizer())
840     return InternalAlloc(sz, nullptr, align);
841   SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
842   return user_aligned_alloc(thr, pc, align, sz);
843 }
844 
845 TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
846   if (in_symbolizer())
847     return InternalAlloc(sz, nullptr, GetPageSizeCached());
848   SCOPED_INTERCEPTOR_RAW(valloc, sz);
849   return user_valloc(thr, pc, sz);
850 }
851 #endif
852 
853 #if SANITIZER_LINUX
854 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
855   if (in_symbolizer()) {
856     uptr PageSize = GetPageSizeCached();
857     sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
858     return InternalAlloc(sz, nullptr, PageSize);
859   }
860   SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
861   return user_pvalloc(thr, pc, sz);
862 }
863 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
864 #else
865 #define TSAN_MAYBE_INTERCEPT_PVALLOC
866 #endif
867 
868 #if !SANITIZER_APPLE
869 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
870   if (in_symbolizer()) {
871     void *p = InternalAlloc(sz, nullptr, align);
872     if (!p)
873       return errno_ENOMEM;
874     *memptr = p;
875     return 0;
876   }
877   SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
878   return user_posix_memalign(thr, pc, memptr, align, sz);
879 }
880 #endif
881 
882 // Both __cxa_guard_acquire and pthread_once 0-initialize
883 // the object initially. pthread_once does not have any
884 // other ABI requirements. __cxa_guard_acquire assumes
885 // that any non-0 value in the first byte means that
886 // initialization is completed. Contents of the remaining
887 // bytes are up to us.
888 constexpr u32 kGuardInit = 0;
889 constexpr u32 kGuardDone = 1;
890 constexpr u32 kGuardRunning = 1 << 16;
891 constexpr u32 kGuardWaiter = 1 << 17;
892 
893 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g,
894                          bool blocking_hooks = true) {
895   if (blocking_hooks)
896     OnPotentiallyBlockingRegionBegin();
897   auto on_exit = at_scope_exit([blocking_hooks] {
898     if (blocking_hooks)
899       OnPotentiallyBlockingRegionEnd();
900   });
901 
902   for (;;) {
903     u32 cmp = atomic_load(g, memory_order_acquire);
904     if (cmp == kGuardInit) {
905       if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning,
906                                          memory_order_relaxed))
907         return 1;
908     } else if (cmp == kGuardDone) {
909       if (!thr->in_ignored_lib)
910         Acquire(thr, pc, (uptr)g);
911       return 0;
912     } else {
913       if ((cmp & kGuardWaiter) ||
914           atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter,
915                                          memory_order_relaxed))
916         FutexWait(g, cmp | kGuardWaiter);
917     }
918   }
919 }
920 
921 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g,
922                           u32 v) {
923   if (!thr->in_ignored_lib)
924     Release(thr, pc, (uptr)g);
925   u32 old = atomic_exchange(g, v, memory_order_release);
926   if (old & kGuardWaiter)
927     FutexWake(g, 1 << 30);
928 }
929 
930 // __cxa_guard_acquire and friends need to be intercepted in a special way -
931 // regular interceptors will break statically-linked libstdc++. Linux
932 // interceptors are especially defined as weak functions (so that they don't
933 // cause link errors when user defines them as well). So they silently
934 // auto-disable themselves when such symbol is already present in the binary. If
935 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which
936 // will silently replace our interceptor.  That's why on Linux we simply export
937 // these interceptors with INTERFACE_ATTRIBUTE.
938 // On OS X, we don't support statically linking, so we just use a regular
939 // interceptor.
940 #if SANITIZER_APPLE
941 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
942 #else
943 #define STDCXX_INTERCEPTOR(rettype, name, ...) \
944   extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
945 #endif
946 
947 // Used in thread-safe function static initialization.
948 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
949   SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
950   return guard_acquire(thr, pc, g);
951 }
952 
953 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
954   SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
955   guard_release(thr, pc, g, kGuardDone);
956 }
957 
958 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
959   SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
960   guard_release(thr, pc, g, kGuardInit);
961 }
962 
963 namespace __tsan {
964 void DestroyThreadState() {
965   ThreadState *thr = cur_thread();
966   Processor *proc = thr->proc();
967   ThreadFinish(thr);
968   ProcUnwire(proc, thr);
969   ProcDestroy(proc);
970   DTLS_Destroy();
971   cur_thread_finalize();
972 }
973 
974 void PlatformCleanUpThreadState(ThreadState *thr) {
975   ThreadSignalContext *sctx = (ThreadSignalContext *)atomic_load(
976       &thr->signal_ctx, memory_order_relaxed);
977   if (sctx) {
978     atomic_store(&thr->signal_ctx, 0, memory_order_relaxed);
979     UnmapOrDie(sctx, sizeof(*sctx));
980   }
981 }
982 }  // namespace __tsan
983 
984 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
985 static void thread_finalize(void *v) {
986   uptr iter = (uptr)v;
987   if (iter > 1) {
988     if (pthread_setspecific(interceptor_ctx()->finalize_key,
989         (void*)(iter - 1))) {
990       Printf("ThreadSanitizer: failed to set thread key\n");
991       Die();
992     }
993     return;
994   }
995   DestroyThreadState();
996 }
997 #endif
998 
999 
1000 struct ThreadParam {
1001   void* (*callback)(void *arg);
1002   void *param;
1003   Tid tid;
1004   Semaphore created;
1005   Semaphore started;
1006 };
1007 
1008 extern "C" void *__tsan_thread_start_func(void *arg) {
1009   ThreadParam *p = (ThreadParam*)arg;
1010   void* (*callback)(void *arg) = p->callback;
1011   void *param = p->param;
1012   {
1013     ThreadState *thr = cur_thread_init();
1014     // Thread-local state is not initialized yet.
1015     ScopedIgnoreInterceptors ignore;
1016 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
1017     ThreadIgnoreBegin(thr, 0);
1018     if (pthread_setspecific(interceptor_ctx()->finalize_key,
1019                             (void *)GetPthreadDestructorIterations())) {
1020       Printf("ThreadSanitizer: failed to set thread key\n");
1021       Die();
1022     }
1023     ThreadIgnoreEnd(thr);
1024 #endif
1025     p->created.Wait();
1026     Processor *proc = ProcCreate();
1027     ProcWire(proc, thr);
1028     ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular);
1029     p->started.Post();
1030   }
1031   void *res = callback(param);
1032   // Prevent the callback from being tail called,
1033   // it mixes up stack traces.
1034   volatile int foo = 42;
1035   foo++;
1036   return res;
1037 }
1038 
1039 TSAN_INTERCEPTOR(int, pthread_create,
1040     void *th, void *attr, void *(*callback)(void*), void * param) {
1041   SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
1042 
1043   MaybeSpawnBackgroundThread();
1044 
1045   if (ctx->after_multithreaded_fork) {
1046     if (flags()->die_after_fork) {
1047       Report("ThreadSanitizer: starting new threads after multi-threaded "
1048           "fork is not supported. Dying (set die_after_fork=0 to override)\n");
1049       Die();
1050     } else {
1051       VPrintf(1,
1052               "ThreadSanitizer: starting new threads after multi-threaded "
1053               "fork is not supported (pid %lu). Continuing because of "
1054               "die_after_fork=0, but you are on your own\n",
1055               internal_getpid());
1056     }
1057   }
1058   __sanitizer_pthread_attr_t myattr;
1059   if (attr == 0) {
1060     pthread_attr_init(&myattr);
1061     attr = &myattr;
1062   }
1063   int detached = 0;
1064   REAL(pthread_attr_getdetachstate)(attr, &detached);
1065   AdjustStackSize(attr);
1066 
1067   ThreadParam p;
1068   p.callback = callback;
1069   p.param = param;
1070   p.tid = kMainTid;
1071   int res = -1;
1072   {
1073     // Otherwise we see false positives in pthread stack manipulation.
1074     ScopedIgnoreInterceptors ignore;
1075     ThreadIgnoreBegin(thr, pc);
1076     res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
1077     ThreadIgnoreEnd(thr);
1078   }
1079   if (res == 0) {
1080     p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached));
1081     CHECK_NE(p.tid, kMainTid);
1082     // Synchronization on p.tid serves two purposes:
1083     // 1. ThreadCreate must finish before the new thread starts.
1084     //    Otherwise the new thread can call pthread_detach, but the pthread_t
1085     //    identifier is not yet registered in ThreadRegistry by ThreadCreate.
1086     // 2. ThreadStart must finish before this thread continues.
1087     //    Otherwise, this thread can call pthread_detach and reset thr->sync
1088     //    before the new thread got a chance to acquire from it in ThreadStart.
1089     p.created.Post();
1090     p.started.Wait();
1091   }
1092   if (attr == &myattr)
1093     pthread_attr_destroy(&myattr);
1094   return res;
1095 }
1096 
1097 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
1098   SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
1099   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1100   ThreadIgnoreBegin(thr, pc);
1101   int res = BLOCK_REAL(pthread_join)(th, ret);
1102   ThreadIgnoreEnd(thr);
1103   if (res == 0) {
1104     ThreadJoin(thr, pc, tid);
1105   }
1106   return res;
1107 }
1108 
1109 // DEFINE_INTERNAL_PTHREAD_FUNCTIONS
1110 namespace __sanitizer {
1111 int internal_pthread_create(void *th, void *attr, void *(*callback)(void *),
1112                             void *param) {
1113   ScopedIgnoreInterceptors ignore;
1114   return REAL(pthread_create)(th, attr, callback, param);
1115 }
1116 int internal_pthread_join(void *th, void **ret) {
1117   ScopedIgnoreInterceptors ignore;
1118   return REAL(pthread_join)(th, ret);
1119 }
1120 }  // namespace __sanitizer
1121 
1122 TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
1123   SCOPED_INTERCEPTOR_RAW(pthread_detach, th);
1124   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1125   int res = REAL(pthread_detach)(th);
1126   if (res == 0) {
1127     ThreadDetach(thr, pc, tid);
1128   }
1129   return res;
1130 }
1131 
1132 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
1133   {
1134     SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
1135 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
1136     CHECK_EQ(thr, &cur_thread_placeholder);
1137 #endif
1138   }
1139   REAL(pthread_exit)(retval);
1140 }
1141 
1142 #if SANITIZER_LINUX
1143 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
1144   SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret);
1145   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1146   ThreadIgnoreBegin(thr, pc);
1147   int res = REAL(pthread_tryjoin_np)(th, ret);
1148   ThreadIgnoreEnd(thr);
1149   if (res == 0)
1150     ThreadJoin(thr, pc, tid);
1151   else
1152     ThreadNotJoined(thr, pc, tid, (uptr)th);
1153   return res;
1154 }
1155 
1156 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
1157                  const struct timespec *abstime) {
1158   SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime);
1159   Tid tid = ThreadConsumeTid(thr, pc, (uptr)th);
1160   ThreadIgnoreBegin(thr, pc);
1161   int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
1162   ThreadIgnoreEnd(thr);
1163   if (res == 0)
1164     ThreadJoin(thr, pc, tid);
1165   else
1166     ThreadNotJoined(thr, pc, tid, (uptr)th);
1167   return res;
1168 }
1169 #endif
1170 
1171 // Problem:
1172 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
1173 // pthread_cond_t has different size in the different versions.
1174 // If call new REAL functions for old pthread_cond_t, they will corrupt memory
1175 // after pthread_cond_t (old cond is smaller).
1176 // If we call old REAL functions for new pthread_cond_t, we will lose  some
1177 // functionality (e.g. old functions do not support waiting against
1178 // CLOCK_REALTIME).
1179 // Proper handling would require to have 2 versions of interceptors as well.
1180 // But this is messy, in particular requires linker scripts when sanitizer
1181 // runtime is linked into a shared library.
1182 // Instead we assume we don't have dynamic libraries built against old
1183 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
1184 // that allows to work with old libraries (but this mode does not support
1185 // some features, e.g. pthread_condattr_getpshared).
1186 static void *init_cond(void *c, bool force = false) {
1187   // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
1188   // So we allocate additional memory on the side large enough to hold
1189   // any pthread_cond_t object. Always call new REAL functions, but pass
1190   // the aux object to them.
1191   // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
1192   // first word of pthread_cond_t to zero.
1193   // It's all relevant only for linux.
1194   if (!common_flags()->legacy_pthread_cond)
1195     return c;
1196   atomic_uintptr_t *p = (atomic_uintptr_t*)c;
1197   uptr cond = atomic_load(p, memory_order_acquire);
1198   if (!force && cond != 0)
1199     return (void*)cond;
1200   void *newcond = WRAP(malloc)(pthread_cond_t_sz);
1201   internal_memset(newcond, 0, pthread_cond_t_sz);
1202   if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
1203       memory_order_acq_rel))
1204     return newcond;
1205   WRAP(free)(newcond);
1206   return (void*)cond;
1207 }
1208 
1209 namespace {
1210 
1211 template <class Fn>
1212 struct CondMutexUnlockCtx {
1213   ScopedInterceptor *si;
1214   ThreadState *thr;
1215   uptr pc;
1216   void *m;
1217   void *c;
1218   const Fn &fn;
1219 
1220   int Cancel() const { return fn(); }
1221   void Unlock() const;
1222 };
1223 
1224 template <class Fn>
1225 void CondMutexUnlockCtx<Fn>::Unlock() const {
1226   // pthread_cond_wait interceptor has enabled async signal delivery
1227   // (see BlockingCall below). Disable async signals since we are running
1228   // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
1229   // since the thread is cancelled, so we have to manually execute them
1230   // (the thread still can run some user code due to pthread_cleanup_push).
1231   CHECK_EQ(atomic_load(&thr->in_blocking_func, memory_order_relaxed), 1);
1232   atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
1233   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1234   // Undo BlockingCall ctor effects.
1235   thr->ignore_interceptors--;
1236   si->~ScopedInterceptor();
1237 }
1238 }  // namespace
1239 
1240 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
1241   void *cond = init_cond(c, true);
1242   SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
1243   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1244   return REAL(pthread_cond_init)(cond, a);
1245 }
1246 
1247 template <class Fn>
1248 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn,
1249               void *c, void *m) {
1250   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1251   MutexUnlock(thr, pc, (uptr)m);
1252   int res = 0;
1253   // This ensures that we handle mutex lock even in case of pthread_cancel.
1254   // See test/tsan/cond_cancel.cpp.
1255   {
1256     // Enable signal delivery while the thread is blocked.
1257     BlockingCall bc(thr);
1258     CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn};
1259     res = call_pthread_cancel_with_cleanup(
1260         [](void *arg) -> int {
1261           return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel();
1262         },
1263         [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); },
1264         &arg);
1265   }
1266   if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
1267   MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
1268   return res;
1269 }
1270 
1271 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
1272   void *cond = init_cond(c);
1273   SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
1274   return cond_wait(
1275       thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond,
1276       m);
1277 }
1278 
1279 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
1280   void *cond = init_cond(c);
1281   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
1282   return cond_wait(
1283       thr, pc, &si,
1284       [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond,
1285       m);
1286 }
1287 
1288 #if SANITIZER_LINUX
1289 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m,
1290             __sanitizer_clockid_t clock, void *abstime) {
1291   void *cond = init_cond(c);
1292   SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime);
1293   return cond_wait(
1294       thr, pc, &si,
1295       [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); },
1296       cond, m);
1297 }
1298 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait)
1299 #else
1300 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT
1301 #endif
1302 
1303 #if SANITIZER_APPLE
1304 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
1305             void *reltime) {
1306   void *cond = init_cond(c);
1307   SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
1308   return cond_wait(
1309       thr, pc, &si,
1310       [=]() {
1311         return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime);
1312       },
1313       cond, m);
1314 }
1315 #endif
1316 
1317 INTERCEPTOR(int, pthread_cond_signal, void *c) {
1318   void *cond = init_cond(c);
1319   SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
1320   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1321   return REAL(pthread_cond_signal)(cond);
1322 }
1323 
1324 INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
1325   void *cond = init_cond(c);
1326   SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
1327   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
1328   return REAL(pthread_cond_broadcast)(cond);
1329 }
1330 
1331 INTERCEPTOR(int, pthread_cond_destroy, void *c) {
1332   void *cond = init_cond(c);
1333   SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
1334   MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
1335   int res = REAL(pthread_cond_destroy)(cond);
1336   if (common_flags()->legacy_pthread_cond) {
1337     // Free our aux cond and zero the pointer to not leave dangling pointers.
1338     WRAP(free)(cond);
1339     atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
1340   }
1341   return res;
1342 }
1343 
1344 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
1345   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
1346   int res = REAL(pthread_mutex_init)(m, a);
1347   if (res == 0) {
1348     u32 flagz = 0;
1349     if (a) {
1350       int type = 0;
1351       if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
1352         if (type == PTHREAD_MUTEX_RECURSIVE ||
1353             type == PTHREAD_MUTEX_RECURSIVE_NP)
1354           flagz |= MutexFlagWriteReentrant;
1355     }
1356     MutexCreate(thr, pc, (uptr)m, flagz);
1357   }
1358   return res;
1359 }
1360 
1361 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
1362   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
1363   int res = REAL(pthread_mutex_destroy)(m);
1364   if (res == 0 || res == errno_EBUSY) {
1365     MutexDestroy(thr, pc, (uptr)m);
1366   }
1367   return res;
1368 }
1369 
1370 TSAN_INTERCEPTOR(int, pthread_mutex_lock, void *m) {
1371   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_lock, m);
1372   MutexPreLock(thr, pc, (uptr)m);
1373   int res = BLOCK_REAL(pthread_mutex_lock)(m);
1374   if (res == errno_EOWNERDEAD)
1375     MutexRepair(thr, pc, (uptr)m);
1376   if (res == 0 || res == errno_EOWNERDEAD)
1377     MutexPostLock(thr, pc, (uptr)m);
1378   if (res == errno_EINVAL)
1379     MutexInvalidAccess(thr, pc, (uptr)m);
1380   return res;
1381 }
1382 
1383 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
1384   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
1385   int res = REAL(pthread_mutex_trylock)(m);
1386   if (res == errno_EOWNERDEAD)
1387     MutexRepair(thr, pc, (uptr)m);
1388   if (res == 0 || res == errno_EOWNERDEAD)
1389     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1390   return res;
1391 }
1392 
1393 #if !SANITIZER_APPLE
1394 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
1395   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
1396   int res = REAL(pthread_mutex_timedlock)(m, abstime);
1397   if (res == 0) {
1398     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1399   }
1400   return res;
1401 }
1402 #endif
1403 
1404 TSAN_INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
1405   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_unlock, m);
1406   MutexUnlock(thr, pc, (uptr)m);
1407   int res = REAL(pthread_mutex_unlock)(m);
1408   if (res == errno_EINVAL)
1409     MutexInvalidAccess(thr, pc, (uptr)m);
1410   return res;
1411 }
1412 
1413 #if SANITIZER_LINUX
1414 TSAN_INTERCEPTOR(int, pthread_mutex_clocklock, void *m,
1415                  __sanitizer_clockid_t clock, void *abstime) {
1416   SCOPED_TSAN_INTERCEPTOR(pthread_mutex_clocklock, m, clock, abstime);
1417   MutexPreLock(thr, pc, (uptr)m);
1418   int res = BLOCK_REAL(pthread_mutex_clocklock)(m, clock, abstime);
1419   if (res == errno_EOWNERDEAD)
1420     MutexRepair(thr, pc, (uptr)m);
1421   if (res == 0 || res == errno_EOWNERDEAD)
1422     MutexPostLock(thr, pc, (uptr)m);
1423   if (res == errno_EINVAL)
1424     MutexInvalidAccess(thr, pc, (uptr)m);
1425   return res;
1426 }
1427 #endif
1428 
1429 #if SANITIZER_GLIBC
1430 #  if !__GLIBC_PREREQ(2, 34)
1431 // glibc 2.34 applies a non-default version for the two functions. They are no
1432 // longer expected to be intercepted by programs.
1433 TSAN_INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
1434   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_lock, m);
1435   MutexPreLock(thr, pc, (uptr)m);
1436   int res = BLOCK_REAL(__pthread_mutex_lock)(m);
1437   if (res == errno_EOWNERDEAD)
1438     MutexRepair(thr, pc, (uptr)m);
1439   if (res == 0 || res == errno_EOWNERDEAD)
1440     MutexPostLock(thr, pc, (uptr)m);
1441   if (res == errno_EINVAL)
1442     MutexInvalidAccess(thr, pc, (uptr)m);
1443   return res;
1444 }
1445 
1446 TSAN_INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
1447   SCOPED_TSAN_INTERCEPTOR(__pthread_mutex_unlock, m);
1448   MutexUnlock(thr, pc, (uptr)m);
1449   int res = REAL(__pthread_mutex_unlock)(m);
1450   if (res == errno_EINVAL)
1451     MutexInvalidAccess(thr, pc, (uptr)m);
1452   return res;
1453 }
1454 #  endif
1455 #endif
1456 
1457 #if !SANITIZER_APPLE
1458 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
1459   SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
1460   int res = REAL(pthread_spin_init)(m, pshared);
1461   if (res == 0) {
1462     MutexCreate(thr, pc, (uptr)m);
1463   }
1464   return res;
1465 }
1466 
1467 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
1468   SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
1469   int res = REAL(pthread_spin_destroy)(m);
1470   if (res == 0) {
1471     MutexDestroy(thr, pc, (uptr)m);
1472   }
1473   return res;
1474 }
1475 
1476 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
1477   SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
1478   MutexPreLock(thr, pc, (uptr)m);
1479   int res = BLOCK_REAL(pthread_spin_lock)(m);
1480   if (res == 0) {
1481     MutexPostLock(thr, pc, (uptr)m);
1482   }
1483   return res;
1484 }
1485 
1486 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
1487   SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
1488   int res = REAL(pthread_spin_trylock)(m);
1489   if (res == 0) {
1490     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1491   }
1492   return res;
1493 }
1494 
1495 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
1496   SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
1497   MutexUnlock(thr, pc, (uptr)m);
1498   int res = REAL(pthread_spin_unlock)(m);
1499   return res;
1500 }
1501 #endif
1502 
1503 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
1504   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
1505   int res = REAL(pthread_rwlock_init)(m, a);
1506   if (res == 0) {
1507     MutexCreate(thr, pc, (uptr)m);
1508   }
1509   return res;
1510 }
1511 
1512 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
1513   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
1514   int res = REAL(pthread_rwlock_destroy)(m);
1515   if (res == 0) {
1516     MutexDestroy(thr, pc, (uptr)m);
1517   }
1518   return res;
1519 }
1520 
1521 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
1522   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
1523   MutexPreReadLock(thr, pc, (uptr)m);
1524   int res = REAL(pthread_rwlock_rdlock)(m);
1525   if (res == 0) {
1526     MutexPostReadLock(thr, pc, (uptr)m);
1527   }
1528   return res;
1529 }
1530 
1531 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
1532   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
1533   int res = REAL(pthread_rwlock_tryrdlock)(m);
1534   if (res == 0) {
1535     MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
1536   }
1537   return res;
1538 }
1539 
1540 #if !SANITIZER_APPLE
1541 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
1542   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
1543   int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
1544   if (res == 0) {
1545     MutexPostReadLock(thr, pc, (uptr)m);
1546   }
1547   return res;
1548 }
1549 #endif
1550 
1551 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
1552   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
1553   MutexPreLock(thr, pc, (uptr)m);
1554   int res = BLOCK_REAL(pthread_rwlock_wrlock)(m);
1555   if (res == 0) {
1556     MutexPostLock(thr, pc, (uptr)m);
1557   }
1558   return res;
1559 }
1560 
1561 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
1562   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
1563   int res = REAL(pthread_rwlock_trywrlock)(m);
1564   if (res == 0) {
1565     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1566   }
1567   return res;
1568 }
1569 
1570 #if !SANITIZER_APPLE
1571 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
1572   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
1573   int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
1574   if (res == 0) {
1575     MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
1576   }
1577   return res;
1578 }
1579 #endif
1580 
1581 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
1582   SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
1583   MutexReadOrWriteUnlock(thr, pc, (uptr)m);
1584   int res = REAL(pthread_rwlock_unlock)(m);
1585   return res;
1586 }
1587 
1588 #if !SANITIZER_APPLE
1589 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
1590   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
1591   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1592   int res = REAL(pthread_barrier_init)(b, a, count);
1593   return res;
1594 }
1595 
1596 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
1597   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
1598   MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite);
1599   int res = REAL(pthread_barrier_destroy)(b);
1600   return res;
1601 }
1602 
1603 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
1604   SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
1605   Release(thr, pc, (uptr)b);
1606   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1607   int res = REAL(pthread_barrier_wait)(b);
1608   MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead);
1609   if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
1610     Acquire(thr, pc, (uptr)b);
1611   }
1612   return res;
1613 }
1614 #endif
1615 
1616 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
1617   SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
1618   if (o == 0 || f == 0)
1619     return errno_EINVAL;
1620   atomic_uint32_t *a;
1621 
1622   if (SANITIZER_APPLE)
1623     a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
1624   else if (SANITIZER_NETBSD)
1625     a = static_cast<atomic_uint32_t*>
1626           ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
1627   else
1628     a = static_cast<atomic_uint32_t*>(o);
1629 
1630   // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks
1631   // result in crashes due to too little stack space.
1632   if (guard_acquire(thr, pc, a, !SANITIZER_APPLE)) {
1633     (*f)();
1634     guard_release(thr, pc, a, kGuardDone);
1635   }
1636   return 0;
1637 }
1638 
1639 #if SANITIZER_GLIBC
1640 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
1641   SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
1642   if (fd > 0)
1643     FdAccess(thr, pc, fd);
1644   return REAL(__fxstat)(version, fd, buf);
1645 }
1646 
1647 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
1648   SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
1649   if (fd > 0)
1650     FdAccess(thr, pc, fd);
1651   return REAL(__fxstat64)(version, fd, buf);
1652 }
1653 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat); TSAN_INTERCEPT(__fxstat64)
1654 #else
1655 #define TSAN_MAYBE_INTERCEPT___FXSTAT
1656 #endif
1657 
1658 #if !SANITIZER_GLIBC || __GLIBC_PREREQ(2, 33)
1659 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
1660   SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
1661   if (fd > 0)
1662     FdAccess(thr, pc, fd);
1663   return REAL(fstat)(fd, buf);
1664 }
1665 #  define TSAN_MAYBE_INTERCEPT_FSTAT TSAN_INTERCEPT(fstat)
1666 #else
1667 #  define TSAN_MAYBE_INTERCEPT_FSTAT
1668 #endif
1669 
1670 #if __GLIBC_PREREQ(2, 33)
1671 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
1672   SCOPED_TSAN_INTERCEPTOR(fstat64, fd, buf);
1673   if (fd > 0)
1674     FdAccess(thr, pc, fd);
1675   return REAL(fstat64)(fd, buf);
1676 }
1677 #  define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
1678 #else
1679 #  define TSAN_MAYBE_INTERCEPT_FSTAT64
1680 #endif
1681 
1682 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) {
1683   mode_t mode = 0;
1684   if (OpenReadsVaArgs(oflag)) {
1685     va_list ap;
1686     va_start(ap, oflag);
1687     mode = va_arg(ap, int);
1688     va_end(ap);
1689   }
1690 
1691   SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode);
1692   READ_STRING(thr, pc, name, 0);
1693 
1694   int fd;
1695   if (OpenReadsVaArgs(oflag))
1696     fd = REAL(open)(name, oflag, mode);
1697   else
1698     fd = REAL(open)(name, oflag);
1699 
1700   if (fd >= 0)
1701     FdFileCreate(thr, pc, fd);
1702   return fd;
1703 }
1704 
1705 #if SANITIZER_LINUX
1706 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) {
1707   va_list ap;
1708   va_start(ap, oflag);
1709   mode_t mode = va_arg(ap, int);
1710   va_end(ap);
1711   SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode);
1712   READ_STRING(thr, pc, name, 0);
1713   int fd = REAL(open64)(name, oflag, mode);
1714   if (fd >= 0)
1715     FdFileCreate(thr, pc, fd);
1716   return fd;
1717 }
1718 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
1719 #else
1720 #define TSAN_MAYBE_INTERCEPT_OPEN64
1721 #endif
1722 
1723 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
1724   SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
1725   READ_STRING(thr, pc, name, 0);
1726   int fd = REAL(creat)(name, mode);
1727   if (fd >= 0)
1728     FdFileCreate(thr, pc, fd);
1729   return fd;
1730 }
1731 
1732 #if SANITIZER_LINUX
1733 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
1734   SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
1735   READ_STRING(thr, pc, name, 0);
1736   int fd = REAL(creat64)(name, mode);
1737   if (fd >= 0)
1738     FdFileCreate(thr, pc, fd);
1739   return fd;
1740 }
1741 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
1742 #else
1743 #define TSAN_MAYBE_INTERCEPT_CREAT64
1744 #endif
1745 
1746 TSAN_INTERCEPTOR(int, dup, int oldfd) {
1747   SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
1748   int newfd = REAL(dup)(oldfd);
1749   if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
1750     FdDup(thr, pc, oldfd, newfd, true);
1751   return newfd;
1752 }
1753 
1754 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
1755   SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
1756   int newfd2 = REAL(dup2)(oldfd, newfd);
1757   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1758     FdDup(thr, pc, oldfd, newfd2, false);
1759   return newfd2;
1760 }
1761 
1762 #if !SANITIZER_APPLE
1763 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
1764   SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
1765   int newfd2 = REAL(dup3)(oldfd, newfd, flags);
1766   if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
1767     FdDup(thr, pc, oldfd, newfd2, false);
1768   return newfd2;
1769 }
1770 #endif
1771 
1772 #if SANITIZER_LINUX
1773 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
1774   SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
1775   int fd = REAL(eventfd)(initval, flags);
1776   if (fd >= 0)
1777     FdEventCreate(thr, pc, fd);
1778   return fd;
1779 }
1780 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
1781 #else
1782 #define TSAN_MAYBE_INTERCEPT_EVENTFD
1783 #endif
1784 
1785 #if SANITIZER_LINUX
1786 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
1787   SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags);
1788   FdClose(thr, pc, fd);
1789   fd = REAL(signalfd)(fd, mask, flags);
1790   if (!MustIgnoreInterceptor(thr))
1791     FdSignalCreate(thr, pc, fd);
1792   return fd;
1793 }
1794 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
1795 #else
1796 #define TSAN_MAYBE_INTERCEPT_SIGNALFD
1797 #endif
1798 
1799 #if SANITIZER_LINUX
1800 TSAN_INTERCEPTOR(int, inotify_init, int fake) {
1801   SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
1802   int fd = REAL(inotify_init)(fake);
1803   if (fd >= 0)
1804     FdInotifyCreate(thr, pc, fd);
1805   return fd;
1806 }
1807 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
1808 #else
1809 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
1810 #endif
1811 
1812 #if SANITIZER_LINUX
1813 TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
1814   SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
1815   int fd = REAL(inotify_init1)(flags);
1816   if (fd >= 0)
1817     FdInotifyCreate(thr, pc, fd);
1818   return fd;
1819 }
1820 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
1821 #else
1822 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
1823 #endif
1824 
1825 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
1826   SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
1827   int fd = REAL(socket)(domain, type, protocol);
1828   if (fd >= 0)
1829     FdSocketCreate(thr, pc, fd);
1830   return fd;
1831 }
1832 
1833 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
1834   SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
1835   int res = REAL(socketpair)(domain, type, protocol, fd);
1836   if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
1837     FdPipeCreate(thr, pc, fd[0], fd[1]);
1838   return res;
1839 }
1840 
1841 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
1842   SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
1843   FdSocketConnecting(thr, pc, fd);
1844   int res = REAL(connect)(fd, addr, addrlen);
1845   if (res == 0 && fd >= 0)
1846     FdSocketConnect(thr, pc, fd);
1847   return res;
1848 }
1849 
1850 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
1851   SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
1852   int res = REAL(bind)(fd, addr, addrlen);
1853   if (fd > 0 && res == 0)
1854     FdAccess(thr, pc, fd);
1855   return res;
1856 }
1857 
1858 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
1859   SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
1860   int res = REAL(listen)(fd, backlog);
1861   if (fd > 0 && res == 0)
1862     FdAccess(thr, pc, fd);
1863   return res;
1864 }
1865 
1866 TSAN_INTERCEPTOR(int, close, int fd) {
1867   SCOPED_INTERCEPTOR_RAW(close, fd);
1868   if (!in_symbolizer())
1869     FdClose(thr, pc, fd);
1870   return REAL(close)(fd);
1871 }
1872 
1873 #if SANITIZER_LINUX
1874 TSAN_INTERCEPTOR(int, __close, int fd) {
1875   SCOPED_INTERCEPTOR_RAW(__close, fd);
1876   FdClose(thr, pc, fd);
1877   return REAL(__close)(fd);
1878 }
1879 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
1880 #else
1881 #define TSAN_MAYBE_INTERCEPT___CLOSE
1882 #endif
1883 
1884 // glibc guts
1885 #if SANITIZER_LINUX && !SANITIZER_ANDROID
1886 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
1887   SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr);
1888   int fds[64];
1889   int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
1890   for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]);
1891   REAL(__res_iclose)(state, free_addr);
1892 }
1893 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
1894 #else
1895 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
1896 #endif
1897 
1898 TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
1899   SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
1900   int res = REAL(pipe)(pipefd);
1901   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1902     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1903   return res;
1904 }
1905 
1906 #if !SANITIZER_APPLE
1907 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
1908   SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
1909   int res = REAL(pipe2)(pipefd, flags);
1910   if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
1911     FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
1912   return res;
1913 }
1914 #endif
1915 
1916 TSAN_INTERCEPTOR(int, unlink, char *path) {
1917   SCOPED_TSAN_INTERCEPTOR(unlink, path);
1918   Release(thr, pc, File2addr(path));
1919   int res = REAL(unlink)(path);
1920   return res;
1921 }
1922 
1923 TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
1924   SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
1925   void *res = REAL(tmpfile)(fake);
1926   if (res) {
1927     int fd = fileno_unlocked(res);
1928     if (fd >= 0)
1929       FdFileCreate(thr, pc, fd);
1930   }
1931   return res;
1932 }
1933 
1934 #if SANITIZER_LINUX
1935 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
1936   SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
1937   void *res = REAL(tmpfile64)(fake);
1938   if (res) {
1939     int fd = fileno_unlocked(res);
1940     if (fd >= 0)
1941       FdFileCreate(thr, pc, fd);
1942   }
1943   return res;
1944 }
1945 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
1946 #else
1947 #define TSAN_MAYBE_INTERCEPT_TMPFILE64
1948 #endif
1949 
1950 static void FlushStreams() {
1951   // Flushing all the streams here may freeze the process if a child thread is
1952   // performing file stream operations at the same time.
1953   REAL(fflush)(stdout);
1954   REAL(fflush)(stderr);
1955 }
1956 
1957 TSAN_INTERCEPTOR(void, abort, int fake) {
1958   SCOPED_TSAN_INTERCEPTOR(abort, fake);
1959   FlushStreams();
1960   REAL(abort)(fake);
1961 }
1962 
1963 TSAN_INTERCEPTOR(int, rmdir, char *path) {
1964   SCOPED_TSAN_INTERCEPTOR(rmdir, path);
1965   Release(thr, pc, Dir2addr(path));
1966   int res = REAL(rmdir)(path);
1967   return res;
1968 }
1969 
1970 TSAN_INTERCEPTOR(int, closedir, void *dirp) {
1971   SCOPED_INTERCEPTOR_RAW(closedir, dirp);
1972   if (dirp) {
1973     int fd = dirfd(dirp);
1974     FdClose(thr, pc, fd);
1975   }
1976   return REAL(closedir)(dirp);
1977 }
1978 
1979 #if SANITIZER_LINUX
1980 TSAN_INTERCEPTOR(int, epoll_create, int size) {
1981   SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
1982   int fd = REAL(epoll_create)(size);
1983   if (fd >= 0)
1984     FdPollCreate(thr, pc, fd);
1985   return fd;
1986 }
1987 
1988 TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
1989   SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
1990   int fd = REAL(epoll_create1)(flags);
1991   if (fd >= 0)
1992     FdPollCreate(thr, pc, fd);
1993   return fd;
1994 }
1995 
1996 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
1997   SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
1998   if (epfd >= 0)
1999     FdAccess(thr, pc, epfd);
2000   if (epfd >= 0 && fd >= 0)
2001     FdAccess(thr, pc, fd);
2002   if (op == EPOLL_CTL_ADD && epfd >= 0) {
2003     FdPollAdd(thr, pc, epfd, fd);
2004     FdRelease(thr, pc, epfd);
2005   }
2006   int res = REAL(epoll_ctl)(epfd, op, fd, ev);
2007   return res;
2008 }
2009 
2010 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
2011   SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
2012   if (epfd >= 0)
2013     FdAccess(thr, pc, epfd);
2014   int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
2015   if (res > 0 && epfd >= 0)
2016     FdAcquire(thr, pc, epfd);
2017   return res;
2018 }
2019 
2020 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
2021                  void *sigmask) {
2022   SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
2023   if (epfd >= 0)
2024     FdAccess(thr, pc, epfd);
2025   int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
2026   if (res > 0 && epfd >= 0)
2027     FdAcquire(thr, pc, epfd);
2028   return res;
2029 }
2030 
2031 TSAN_INTERCEPTOR(int, epoll_pwait2, int epfd, void *ev, int cnt, void *timeout,
2032                  void *sigmask) {
2033   SCOPED_INTERCEPTOR_RAW(epoll_pwait2, epfd, ev, cnt, timeout, sigmask);
2034   // This function is new and may not be present in libc and/or kernel.
2035   // Since we effectively add it to libc (as will be probed by the program
2036   // using dlsym or a weak function pointer) we need to handle the case
2037   // when it's not present in the actual libc.
2038   if (!REAL(epoll_pwait2)) {
2039     errno = errno_ENOSYS;
2040     return -1;
2041   }
2042   if (MustIgnoreInterceptor(thr))
2043     REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2044   if (epfd >= 0)
2045     FdAccess(thr, pc, epfd);
2046   int res = BLOCK_REAL(epoll_pwait2)(epfd, ev, cnt, timeout, sigmask);
2047   if (res > 0 && epfd >= 0)
2048     FdAcquire(thr, pc, epfd);
2049   return res;
2050 }
2051 
2052 #  define TSAN_MAYBE_INTERCEPT_EPOLL \
2053     TSAN_INTERCEPT(epoll_create);    \
2054     TSAN_INTERCEPT(epoll_create1);   \
2055     TSAN_INTERCEPT(epoll_ctl);       \
2056     TSAN_INTERCEPT(epoll_wait);      \
2057     TSAN_INTERCEPT(epoll_pwait);     \
2058     TSAN_INTERCEPT(epoll_pwait2)
2059 #else
2060 #define TSAN_MAYBE_INTERCEPT_EPOLL
2061 #endif
2062 
2063 // The following functions are intercepted merely to process pending signals.
2064 // If program blocks signal X, we must deliver the signal before the function
2065 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
2066 // it's better to deliver the signal straight away.
2067 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
2068   SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
2069   return REAL(sigsuspend)(mask);
2070 }
2071 
2072 TSAN_INTERCEPTOR(int, sigblock, int mask) {
2073   SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
2074   return REAL(sigblock)(mask);
2075 }
2076 
2077 TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
2078   SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
2079   return REAL(sigsetmask)(mask);
2080 }
2081 
2082 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
2083     __sanitizer_sigset_t *oldset) {
2084   SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
2085   return REAL(pthread_sigmask)(how, set, oldset);
2086 }
2087 
2088 namespace __tsan {
2089 
2090 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
2091   VarSizeStackTrace stack;
2092   // StackTrace::GetNestInstructionPc(pc) is used because return address is
2093   // expected, OutputReport() will undo this.
2094   ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
2095   ThreadRegistryLock l(&ctx->thread_registry);
2096   ScopedReport rep(ReportTypeErrnoInSignal);
2097   rep.SetSigNum(sig);
2098   if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
2099     rep.AddStack(stack, true);
2100     OutputReport(thr, rep);
2101   }
2102 }
2103 
2104 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
2105                                   int sig, __sanitizer_siginfo *info,
2106                                   void *uctx) {
2107   CHECK(thr->slot);
2108   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2109   if (acquire)
2110     Acquire(thr, 0, (uptr)&sigactions[sig]);
2111   // Signals are generally asynchronous, so if we receive a signals when
2112   // ignores are enabled we should disable ignores. This is critical for sync
2113   // and interceptors, because otherwise we can miss synchronization and report
2114   // false races.
2115   int ignore_reads_and_writes = thr->ignore_reads_and_writes;
2116   int ignore_interceptors = thr->ignore_interceptors;
2117   int ignore_sync = thr->ignore_sync;
2118   // For symbolizer we only process SIGSEGVs synchronously
2119   // (bug in symbolizer or in tsan). But we want to reset
2120   // in_symbolizer to fail gracefully. Symbolizer and user code
2121   // use different memory allocators, so if we don't reset
2122   // in_symbolizer we can get memory allocated with one being
2123   // feed with another, which can cause more crashes.
2124   int in_symbolizer = thr->in_symbolizer;
2125   if (!ctx->after_multithreaded_fork) {
2126     thr->ignore_reads_and_writes = 0;
2127     thr->fast_state.ClearIgnoreBit();
2128     thr->ignore_interceptors = 0;
2129     thr->ignore_sync = 0;
2130     thr->in_symbolizer = 0;
2131   }
2132   // Ensure that the handler does not spoil errno.
2133   const int saved_errno = errno;
2134   errno = 99;
2135   // This code races with sigaction. Be careful to not read sa_sigaction twice.
2136   // Also need to remember pc for reporting before the call,
2137   // because the handler can reset it.
2138   volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO)
2139                          ? (uptr)sigactions[sig].sigaction
2140                          : (uptr)sigactions[sig].handler;
2141   if (pc != sig_dfl && pc != sig_ign) {
2142     // The callback can be either sa_handler or sa_sigaction.
2143     // They have different signatures, but we assume that passing
2144     // additional arguments to sa_handler works and is harmless.
2145     ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
2146   }
2147   if (!ctx->after_multithreaded_fork) {
2148     thr->ignore_reads_and_writes = ignore_reads_and_writes;
2149     if (ignore_reads_and_writes)
2150       thr->fast_state.SetIgnoreBit();
2151     thr->ignore_interceptors = ignore_interceptors;
2152     thr->ignore_sync = ignore_sync;
2153     thr->in_symbolizer = in_symbolizer;
2154   }
2155   // We do not detect errno spoiling for SIGTERM,
2156   // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
2157   // tsan reports false positive in such case.
2158   // It's difficult to properly detect this situation (reraise),
2159   // because in async signal processing case (when handler is called directly
2160   // from rtl_generic_sighandler) we have not yet received the reraised
2161   // signal; and it looks too fragile to intercept all ways to reraise a signal.
2162   if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM &&
2163       errno != 99)
2164     ReportErrnoSpoiling(thr, pc, sig);
2165   errno = saved_errno;
2166 }
2167 
2168 void ProcessPendingSignalsImpl(ThreadState *thr) {
2169   atomic_store(&thr->pending_signals, 0, memory_order_relaxed);
2170   ThreadSignalContext *sctx = SigCtx(thr);
2171   if (sctx == 0)
2172     return;
2173   atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2174   internal_sigfillset(&sctx->emptyset);
2175   int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
2176   CHECK_EQ(res, 0);
2177   for (int sig = 0; sig < kSigCount; sig++) {
2178     SignalDesc *signal = &sctx->pending_signals[sig];
2179     if (signal->armed) {
2180       signal->armed = false;
2181       CallUserSignalHandler(thr, false, true, sig, &signal->siginfo,
2182                             &signal->ctx);
2183     }
2184   }
2185   res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
2186   CHECK_EQ(res, 0);
2187   atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2188 }
2189 
2190 }  // namespace __tsan
2191 
2192 static bool is_sync_signal(ThreadSignalContext *sctx, int sig,
2193                            __sanitizer_siginfo *info) {
2194   // If we are sending signal to ourselves, we must process it now.
2195   if (sctx && sig == sctx->int_signal_send)
2196     return true;
2197 #if SANITIZER_HAS_SIGINFO
2198   // POSIX timers can be configured to send any kind of signal; however, it
2199   // doesn't make any sense to consider a timer signal as synchronous!
2200   if (info->si_code == SI_TIMER)
2201     return false;
2202 #endif
2203   return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP ||
2204          sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS;
2205 }
2206 
2207 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) {
2208   ThreadState *thr = cur_thread_init();
2209   ThreadSignalContext *sctx = SigCtx(thr);
2210   if (sig < 0 || sig >= kSigCount) {
2211     VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
2212     return;
2213   }
2214   // Don't mess with synchronous signals.
2215   const bool sync = is_sync_signal(sctx, sig, info);
2216   if (sync ||
2217       // If we are in blocking function, we can safely process it now
2218       // (but check if we are in a recursive interceptor,
2219       // i.e. pthread_join()->munmap()).
2220       atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2221     atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
2222     if (atomic_load(&thr->in_blocking_func, memory_order_relaxed)) {
2223       atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2224       CallUserSignalHandler(thr, sync, true, sig, info, ctx);
2225       atomic_store(&thr->in_blocking_func, 1, memory_order_relaxed);
2226     } else {
2227       // Be very conservative with when we do acquire in this case.
2228       // It's unsafe to do acquire in async handlers, because ThreadState
2229       // can be in inconsistent state.
2230       // SIGSYS looks relatively safe -- it's synchronous and can actually
2231       // need some global state.
2232       bool acq = (sig == SIGSYS);
2233       CallUserSignalHandler(thr, sync, acq, sig, info, ctx);
2234     }
2235     atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
2236     return;
2237   }
2238 
2239   if (sctx == 0)
2240     return;
2241   SignalDesc *signal = &sctx->pending_signals[sig];
2242   if (signal->armed == false) {
2243     signal->armed = true;
2244     internal_memcpy(&signal->siginfo, info, sizeof(*info));
2245     internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
2246     atomic_store(&thr->pending_signals, 1, memory_order_relaxed);
2247   }
2248 }
2249 
2250 TSAN_INTERCEPTOR(int, raise, int sig) {
2251   SCOPED_TSAN_INTERCEPTOR(raise, sig);
2252   ThreadSignalContext *sctx = SigCtx(thr);
2253   CHECK_NE(sctx, 0);
2254   int prev = sctx->int_signal_send;
2255   sctx->int_signal_send = sig;
2256   int res = REAL(raise)(sig);
2257   CHECK_EQ(sctx->int_signal_send, sig);
2258   sctx->int_signal_send = prev;
2259   return res;
2260 }
2261 
2262 TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
2263   SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
2264   ThreadSignalContext *sctx = SigCtx(thr);
2265   CHECK_NE(sctx, 0);
2266   int prev = sctx->int_signal_send;
2267   if (pid == (int)internal_getpid()) {
2268     sctx->int_signal_send = sig;
2269   }
2270   int res = REAL(kill)(pid, sig);
2271   if (pid == (int)internal_getpid()) {
2272     CHECK_EQ(sctx->int_signal_send, sig);
2273     sctx->int_signal_send = prev;
2274   }
2275   return res;
2276 }
2277 
2278 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
2279   SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
2280   ThreadSignalContext *sctx = SigCtx(thr);
2281   CHECK_NE(sctx, 0);
2282   int prev = sctx->int_signal_send;
2283   bool self = pthread_equal(tid, pthread_self());
2284   if (self)
2285     sctx->int_signal_send = sig;
2286   int res = REAL(pthread_kill)(tid, sig);
2287   if (self) {
2288     CHECK_EQ(sctx->int_signal_send, sig);
2289     sctx->int_signal_send = prev;
2290   }
2291   return res;
2292 }
2293 
2294 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
2295   SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
2296   // It's intercepted merely to process pending signals.
2297   return REAL(gettimeofday)(tv, tz);
2298 }
2299 
2300 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
2301     void *hints, void *rv) {
2302   SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
2303   // We miss atomic synchronization in getaddrinfo,
2304   // and can report false race between malloc and free
2305   // inside of getaddrinfo. So ignore memory accesses.
2306   ThreadIgnoreBegin(thr, pc);
2307   int res = REAL(getaddrinfo)(node, service, hints, rv);
2308   ThreadIgnoreEnd(thr);
2309   return res;
2310 }
2311 
2312 TSAN_INTERCEPTOR(int, fork, int fake) {
2313   if (in_symbolizer())
2314     return REAL(fork)(fake);
2315   SCOPED_INTERCEPTOR_RAW(fork, fake);
2316   return REAL(fork)(fake);
2317 }
2318 
2319 void atfork_prepare() {
2320   if (in_symbolizer())
2321     return;
2322   ThreadState *thr = cur_thread();
2323   const uptr pc = StackTrace::GetCurrentPc();
2324   ForkBefore(thr, pc);
2325 }
2326 
2327 void atfork_parent() {
2328   if (in_symbolizer())
2329     return;
2330   ThreadState *thr = cur_thread();
2331   const uptr pc = StackTrace::GetCurrentPc();
2332   ForkParentAfter(thr, pc);
2333 }
2334 
2335 void atfork_child() {
2336   if (in_symbolizer())
2337     return;
2338   ThreadState *thr = cur_thread();
2339   const uptr pc = StackTrace::GetCurrentPc();
2340   ForkChildAfter(thr, pc, true);
2341   FdOnFork(thr, pc);
2342 }
2343 
2344 #if !SANITIZER_IOS
2345 TSAN_INTERCEPTOR(int, vfork, int fake) {
2346   // Some programs (e.g. openjdk) call close for all file descriptors
2347   // in the child process. Under tsan it leads to false positives, because
2348   // address space is shared, so the parent process also thinks that
2349   // the descriptors are closed (while they are actually not).
2350   // This leads to false positives due to missed synchronization.
2351   // Strictly saying this is undefined behavior, because vfork child is not
2352   // allowed to call any functions other than exec/exit. But this is what
2353   // openjdk does, so we want to handle it.
2354   // We could disable interceptors in the child process. But it's not possible
2355   // to simply intercept and wrap vfork, because vfork child is not allowed
2356   // to return from the function that calls vfork, and that's exactly what
2357   // we would do. So this would require some assembly trickery as well.
2358   // Instead we simply turn vfork into fork.
2359   return WRAP(fork)(fake);
2360 }
2361 #endif
2362 
2363 #if SANITIZER_LINUX
2364 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags,
2365                  void *arg, int *parent_tid, void *tls, pid_t *child_tid) {
2366   SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls,
2367                          child_tid);
2368   struct Arg {
2369     int (*fn)(void *);
2370     void *arg;
2371   };
2372   auto wrapper = +[](void *p) -> int {
2373     auto *thr = cur_thread();
2374     uptr pc = GET_CURRENT_PC();
2375     // Start the background thread for fork, but not for clone.
2376     // For fork we did this always and it's known to work (or user code has
2377     // adopted). But if we do this for the new clone interceptor some code
2378     // (sandbox2) fails. So model we used to do for years and don't start the
2379     // background thread after clone.
2380     ForkChildAfter(thr, pc, false);
2381     FdOnFork(thr, pc);
2382     auto *arg = static_cast<Arg *>(p);
2383     return arg->fn(arg->arg);
2384   };
2385   ForkBefore(thr, pc);
2386   Arg arg_wrapper = {fn, arg};
2387   int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls,
2388                         child_tid);
2389   ForkParentAfter(thr, pc);
2390   return pid;
2391 }
2392 #endif
2393 
2394 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2395 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
2396                                     void *data);
2397 struct dl_iterate_phdr_data {
2398   ThreadState *thr;
2399   uptr pc;
2400   dl_iterate_phdr_cb_t cb;
2401   void *data;
2402 };
2403 
2404 static bool IsAppNotRodata(uptr addr) {
2405   return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata;
2406 }
2407 
2408 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
2409                               void *data) {
2410   dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
2411   // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
2412   // accessible in dl_iterate_phdr callback. But we don't see synchronization
2413   // inside of dynamic linker, so we "unpoison" it here in order to not
2414   // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
2415   // because some libc functions call __libc_dlopen.
2416   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2417     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2418                      internal_strlen(info->dlpi_name));
2419   int res = cbdata->cb(info, size, cbdata->data);
2420   // Perform the check one more time in case info->dlpi_name was overwritten
2421   // by user callback.
2422   if (info && IsAppNotRodata((uptr)info->dlpi_name))
2423     MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
2424                      internal_strlen(info->dlpi_name));
2425   return res;
2426 }
2427 
2428 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
2429   SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
2430   dl_iterate_phdr_data cbdata;
2431   cbdata.thr = thr;
2432   cbdata.pc = pc;
2433   cbdata.cb = cb;
2434   cbdata.data = data;
2435   int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
2436   return res;
2437 }
2438 #endif
2439 
2440 static int OnExit(ThreadState *thr) {
2441   int status = Finalize(thr);
2442   FlushStreams();
2443   return status;
2444 }
2445 
2446 #if !SANITIZER_APPLE
2447 static void HandleRecvmsg(ThreadState *thr, uptr pc,
2448     __sanitizer_msghdr *msg) {
2449   int fds[64];
2450   int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
2451   for (int i = 0; i < cnt; i++)
2452     FdEventCreate(thr, pc, fds[i]);
2453 }
2454 #endif
2455 
2456 #include "sanitizer_common/sanitizer_platform_interceptors.h"
2457 // Causes interceptor recursion (getaddrinfo() and fopen())
2458 #undef SANITIZER_INTERCEPT_GETADDRINFO
2459 // We define our own.
2460 #if SANITIZER_INTERCEPT_TLS_GET_ADDR
2461 #define NEED_TLS_GET_ADDR
2462 #endif
2463 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR
2464 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1
2465 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
2466 
2467 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver)                          \
2468   INTERCEPT_FUNCTION_VER(name, ver)
2469 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \
2470   (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name))
2471 
2472 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
2473   SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__);              \
2474   TsanInterceptorContext _ctx = {thr, pc};                \
2475   ctx = (void *)&_ctx;                                    \
2476   (void)ctx;
2477 
2478 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
2479   if (path)                                           \
2480     Acquire(thr, pc, File2addr(path));                \
2481   if (file) {                                         \
2482     int fd = fileno_unlocked(file);                   \
2483     if (fd >= 0) FdFileCreate(thr, pc, fd);           \
2484   }
2485 
2486 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
2487   if (file) {                                    \
2488     int fd = fileno_unlocked(file);              \
2489     FdClose(thr, pc, fd);                        \
2490   }
2491 
2492 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \
2493   ({                                              \
2494     CheckNoDeepBind(filename, flag);              \
2495     ThreadIgnoreBegin(thr, 0);                    \
2496     void *res = REAL(dlopen)(filename, flag);     \
2497     ThreadIgnoreEnd(thr);                         \
2498     res;                                          \
2499   })
2500 
2501 // Ignore interceptors in OnLibraryLoaded()/Unloaded().  These hooks use code
2502 // (ListOfModules::init, MemoryMappingLayout::DumpListOfModules) that make
2503 // intercepted calls, which can cause deadlockes with ReportRace() which also
2504 // uses this code.
2505 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
2506   ({                                                        \
2507     ScopedIgnoreInterceptors ignore_interceptors;           \
2508     libignore()->OnLibraryLoaded(filename);                 \
2509   })
2510 
2511 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()     \
2512   ({                                              \
2513     ScopedIgnoreInterceptors ignore_interceptors; \
2514     libignore()->OnLibraryUnloaded();             \
2515   })
2516 
2517 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
2518   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
2519 
2520 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
2521   Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
2522 
2523 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
2524   Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
2525 
2526 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
2527   FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2528 
2529 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
2530   FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2531 
2532 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
2533   FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
2534 
2535 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
2536   FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
2537 
2538 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
2539   ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
2540 
2541 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name)         \
2542   if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \
2543     COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name);                     \
2544   else                                                                 \
2545     __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name)
2546 
2547 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
2548 
2549 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
2550   OnExit(((TsanInterceptorContext *) ctx)->thr)
2551 
2552 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd,  \
2553                                      off)                                   \
2554   do {                                                                      \
2555     return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
2556                             off);                                           \
2557   } while (false)
2558 
2559 #define COMMON_INTERCEPTOR_MUNMAP_IMPL(ctx, addr, sz)           \
2560   do {                                                          \
2561     return munmap_interceptor(thr, pc, REAL(munmap), addr, sz); \
2562   } while (false)
2563 
2564 #if !SANITIZER_APPLE
2565 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
2566   HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
2567       ((TsanInterceptorContext *)ctx)->pc, msg)
2568 #endif
2569 
2570 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end)                           \
2571   if (TsanThread *t = GetCurrentThread()) {                                    \
2572     *begin = t->tls_begin();                                                   \
2573     *end = t->tls_end();                                                       \
2574   } else {                                                                     \
2575     *begin = *end = 0;                                                         \
2576   }
2577 
2578 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
2579   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
2580 
2581 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
2582   SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
2583 
2584 #include "sanitizer_common/sanitizer_common_interceptors.inc"
2585 
2586 static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2587                           __sanitizer_sigaction *old);
2588 static __sanitizer_sighandler_ptr signal_impl(int sig,
2589                                               __sanitizer_sighandler_ptr h);
2590 
2591 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
2592   { return sigaction_impl(signo, act, oldact); }
2593 
2594 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
2595   { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
2596 
2597 #define SIGNAL_INTERCEPTOR_ENTER() LazyInitialize(cur_thread_init())
2598 
2599 #include "sanitizer_common/sanitizer_signal_interceptors.inc"
2600 
2601 int sigaction_impl(int sig, const __sanitizer_sigaction *act,
2602                    __sanitizer_sigaction *old) {
2603   // Note: if we call REAL(sigaction) directly for any reason without proxying
2604   // the signal handler through sighandler, very bad things will happen.
2605   // The handler will run synchronously and corrupt tsan per-thread state.
2606   SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
2607   if (sig <= 0 || sig >= kSigCount) {
2608     errno = errno_EINVAL;
2609     return -1;
2610   }
2611   __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
2612   __sanitizer_sigaction old_stored;
2613   if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
2614   __sanitizer_sigaction newact;
2615   if (act) {
2616     // Copy act into sigactions[sig].
2617     // Can't use struct copy, because compiler can emit call to memcpy.
2618     // Can't use internal_memcpy, because it copies byte-by-byte,
2619     // and signal handler reads the handler concurrently. It can read
2620     // some bytes from old value and some bytes from new value.
2621     // Use volatile to prevent insertion of memcpy.
2622     sigactions[sig].handler =
2623         *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
2624     sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
2625     internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
2626                     sizeof(sigactions[sig].sa_mask));
2627 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE && !SANITIZER_NETBSD
2628     sigactions[sig].sa_restorer = act->sa_restorer;
2629 #endif
2630     internal_memcpy(&newact, act, sizeof(newact));
2631     internal_sigfillset(&newact.sa_mask);
2632     if ((act->sa_flags & SA_SIGINFO) ||
2633         ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) {
2634       newact.sa_flags |= SA_SIGINFO;
2635       newact.sigaction = sighandler;
2636     }
2637     ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
2638     act = &newact;
2639   }
2640   int res = REAL(sigaction)(sig, act, old);
2641   if (res == 0 && old && old->sigaction == sighandler)
2642     internal_memcpy(old, &old_stored, sizeof(*old));
2643   return res;
2644 }
2645 
2646 static __sanitizer_sighandler_ptr signal_impl(int sig,
2647                                               __sanitizer_sighandler_ptr h) {
2648   __sanitizer_sigaction act;
2649   act.handler = h;
2650   internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
2651   act.sa_flags = 0;
2652   __sanitizer_sigaction old;
2653   int res = sigaction_symname(sig, &act, &old);
2654   if (res) return (__sanitizer_sighandler_ptr)sig_err;
2655   return old.handler;
2656 }
2657 
2658 #define TSAN_SYSCALL()             \
2659   ThreadState *thr = cur_thread(); \
2660   if (thr->ignore_interceptors)    \
2661     return;                        \
2662   ScopedSyscall scoped_syscall(thr)
2663 
2664 struct ScopedSyscall {
2665   ThreadState *thr;
2666 
2667   explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); }
2668 
2669   ~ScopedSyscall() {
2670     ProcessPendingSignals(thr);
2671   }
2672 };
2673 
2674 #if !SANITIZER_FREEBSD && !SANITIZER_APPLE
2675 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
2676   TSAN_SYSCALL();
2677   MemoryAccessRange(thr, pc, p, s, write);
2678 }
2679 
2680 static USED void syscall_acquire(uptr pc, uptr addr) {
2681   TSAN_SYSCALL();
2682   Acquire(thr, pc, addr);
2683   DPrintf("syscall_acquire(0x%zx))\n", addr);
2684 }
2685 
2686 static USED void syscall_release(uptr pc, uptr addr) {
2687   TSAN_SYSCALL();
2688   DPrintf("syscall_release(0x%zx)\n", addr);
2689   Release(thr, pc, addr);
2690 }
2691 
2692 static void syscall_fd_close(uptr pc, int fd) {
2693   auto *thr = cur_thread();
2694   FdClose(thr, pc, fd);
2695 }
2696 
2697 static USED void syscall_fd_acquire(uptr pc, int fd) {
2698   TSAN_SYSCALL();
2699   FdAcquire(thr, pc, fd);
2700   DPrintf("syscall_fd_acquire(%d)\n", fd);
2701 }
2702 
2703 static USED void syscall_fd_release(uptr pc, int fd) {
2704   TSAN_SYSCALL();
2705   DPrintf("syscall_fd_release(%d)\n", fd);
2706   FdRelease(thr, pc, fd);
2707 }
2708 
2709 static USED void sycall_blocking_start() {
2710   DPrintf("sycall_blocking_start()\n");
2711   ThreadState *thr = cur_thread();
2712   EnterBlockingFunc(thr);
2713   // When we are in a "blocking call", we process signals asynchronously
2714   // (right when they arrive). In this context we do not expect to be
2715   // executing any user/runtime code. The known interceptor sequence when
2716   // this is not true is: pthread_join -> munmap(stack). It's fine
2717   // to ignore munmap in this case -- we handle stack shadow separately.
2718   thr->ignore_interceptors++;
2719 }
2720 
2721 static USED void sycall_blocking_end() {
2722   DPrintf("sycall_blocking_end()\n");
2723   ThreadState *thr = cur_thread();
2724   thr->ignore_interceptors--;
2725   atomic_store(&thr->in_blocking_func, 0, memory_order_relaxed);
2726 }
2727 
2728 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); }
2729 
2730 static void syscall_post_fork(uptr pc, int pid) {
2731   ThreadState *thr = cur_thread();
2732   if (pid == 0) {
2733     // child
2734     ForkChildAfter(thr, pc, true);
2735     FdOnFork(thr, pc);
2736   } else if (pid > 0) {
2737     // parent
2738     ForkParentAfter(thr, pc);
2739   } else {
2740     // error
2741     ForkParentAfter(thr, pc);
2742   }
2743 }
2744 #endif
2745 
2746 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
2747   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
2748 
2749 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
2750   syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
2751 
2752 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
2753   do {                                       \
2754     (void)(p);                               \
2755     (void)(s);                               \
2756   } while (false)
2757 
2758 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
2759   do {                                        \
2760     (void)(p);                                \
2761     (void)(s);                                \
2762   } while (false)
2763 
2764 #define COMMON_SYSCALL_ACQUIRE(addr) \
2765     syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
2766 
2767 #define COMMON_SYSCALL_RELEASE(addr) \
2768     syscall_release(GET_CALLER_PC(), (uptr)(addr))
2769 
2770 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
2771 
2772 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
2773 
2774 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
2775 
2776 #define COMMON_SYSCALL_PRE_FORK() \
2777   syscall_pre_fork(GET_CALLER_PC())
2778 
2779 #define COMMON_SYSCALL_POST_FORK(res) \
2780   syscall_post_fork(GET_CALLER_PC(), res)
2781 
2782 #define COMMON_SYSCALL_BLOCKING_START() sycall_blocking_start()
2783 #define COMMON_SYSCALL_BLOCKING_END() sycall_blocking_end()
2784 
2785 #include "sanitizer_common/sanitizer_common_syscalls.inc"
2786 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
2787 
2788 #ifdef NEED_TLS_GET_ADDR
2789 
2790 static void handle_tls_addr(void *arg, void *res) {
2791   ThreadState *thr = cur_thread();
2792   if (!thr)
2793     return;
2794   DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
2795                                         thr->tls_addr + thr->tls_size);
2796   if (!dtv)
2797     return;
2798   // New DTLS block has been allocated.
2799   MemoryResetRange(thr, 0, dtv->beg, dtv->size);
2800 }
2801 
2802 #if !SANITIZER_S390
2803 // Define own interceptor instead of sanitizer_common's for three reasons:
2804 // 1. It must not process pending signals.
2805 //    Signal handlers may contain MOVDQA instruction (see below).
2806 // 2. It must be as simple as possible to not contain MOVDQA.
2807 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
2808 //    is empty for tsan (meant only for msan).
2809 // Note: __tls_get_addr can be called with mis-aligned stack due to:
2810 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
2811 // So the interceptor must work with mis-aligned stack, in particular, does not
2812 // execute MOVDQA with stack addresses.
2813 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
2814   void *res = REAL(__tls_get_addr)(arg);
2815   handle_tls_addr(arg, res);
2816   return res;
2817 }
2818 #else // SANITIZER_S390
2819 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
2820   uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
2821   char *tp = static_cast<char *>(__builtin_thread_pointer());
2822   handle_tls_addr(arg, res + tp);
2823   return res;
2824 }
2825 #endif
2826 #endif
2827 
2828 #if SANITIZER_NETBSD
2829 TSAN_INTERCEPTOR(void, _lwp_exit) {
2830   SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
2831   DestroyThreadState();
2832   REAL(_lwp_exit)();
2833 }
2834 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
2835 #else
2836 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT
2837 #endif
2838 
2839 #if SANITIZER_FREEBSD
2840 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
2841   SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
2842   DestroyThreadState();
2843   REAL(thr_exit(state));
2844 }
2845 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
2846 #else
2847 #define TSAN_MAYBE_INTERCEPT_THR_EXIT
2848 #endif
2849 
2850 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a)
2851 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c)
2852 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c)
2853 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c)
2854 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m)
2855 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a)
2856 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m)
2857 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m)
2858 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m)
2859 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m)
2860 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a)
2861 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l)
2862 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l)
2863 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l)
2864 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l)
2865 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l)
2866 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l)
2867 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)())
2868 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o)
2869 
2870 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
2871 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
2872 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
2873 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
2874 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
2875 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
2876 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
2877 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_lock, void *m)
2878 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
2879 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_unlock, void *m)
2880 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
2881 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
2882 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
2883 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
2884 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
2885 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
2886 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
2887 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
2888 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
2889   void *c)
2890 
2891 namespace __tsan {
2892 
2893 static void finalize(void *arg) {
2894   ThreadState *thr = cur_thread();
2895   int status = Finalize(thr);
2896   // Make sure the output is not lost.
2897   FlushStreams();
2898   if (status)
2899     Die();
2900 }
2901 
2902 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
2903 static void unreachable() {
2904   Report("FATAL: ThreadSanitizer: unreachable called\n");
2905   Die();
2906 }
2907 #endif
2908 
2909 // Define default implementation since interception of libdispatch  is optional.
2910 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
2911 
2912 void InitializeInterceptors() {
2913 #if !SANITIZER_APPLE
2914   // We need to setup it early, because functions like dlsym() can call it.
2915   REAL(memset) = internal_memset;
2916   REAL(memcpy) = internal_memcpy;
2917 #endif
2918 
2919   __interception::DoesNotSupportStaticLinking();
2920 
2921   new(interceptor_ctx()) InterceptorContext();
2922 
2923   // Interpose __tls_get_addr before the common interposers. This is needed
2924   // because dlsym() may call malloc on failure which could result in other
2925   // interposed functions being called that could eventually make use of TLS.
2926 #ifdef NEED_TLS_GET_ADDR
2927 #  if !SANITIZER_S390
2928   TSAN_INTERCEPT(__tls_get_addr);
2929 #  else
2930   TSAN_INTERCEPT(__tls_get_addr_internal);
2931   TSAN_INTERCEPT(__tls_get_offset);
2932 #  endif
2933 #endif
2934   InitializeCommonInterceptors();
2935   InitializeSignalInterceptors();
2936   InitializeLibdispatchInterceptors();
2937 
2938 #if !SANITIZER_APPLE
2939   InitializeSetjmpInterceptors();
2940 #endif
2941 
2942   TSAN_INTERCEPT(longjmp_symname);
2943   TSAN_INTERCEPT(siglongjmp_symname);
2944 #if SANITIZER_NETBSD
2945   TSAN_INTERCEPT(_longjmp);
2946 #endif
2947 
2948   TSAN_INTERCEPT(malloc);
2949   TSAN_INTERCEPT(__libc_memalign);
2950   TSAN_INTERCEPT(calloc);
2951   TSAN_INTERCEPT(realloc);
2952   TSAN_INTERCEPT(reallocarray);
2953   TSAN_INTERCEPT(free);
2954   TSAN_INTERCEPT(cfree);
2955   TSAN_INTERCEPT(munmap);
2956   TSAN_MAYBE_INTERCEPT_MEMALIGN;
2957   TSAN_INTERCEPT(valloc);
2958   TSAN_MAYBE_INTERCEPT_PVALLOC;
2959   TSAN_INTERCEPT(posix_memalign);
2960 
2961   TSAN_INTERCEPT(strcpy);
2962   TSAN_INTERCEPT(strncpy);
2963   TSAN_INTERCEPT(strdup);
2964 
2965   TSAN_INTERCEPT(pthread_create);
2966   TSAN_INTERCEPT(pthread_join);
2967   TSAN_INTERCEPT(pthread_detach);
2968   TSAN_INTERCEPT(pthread_exit);
2969   #if SANITIZER_LINUX
2970   TSAN_INTERCEPT(pthread_tryjoin_np);
2971   TSAN_INTERCEPT(pthread_timedjoin_np);
2972   #endif
2973 
2974   TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
2975   TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
2976   TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
2977   TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
2978   TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
2979   TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
2980 
2981   TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT;
2982 
2983   TSAN_INTERCEPT(pthread_mutex_init);
2984   TSAN_INTERCEPT(pthread_mutex_destroy);
2985   TSAN_INTERCEPT(pthread_mutex_lock);
2986   TSAN_INTERCEPT(pthread_mutex_trylock);
2987   TSAN_INTERCEPT(pthread_mutex_timedlock);
2988   TSAN_INTERCEPT(pthread_mutex_unlock);
2989 #if SANITIZER_LINUX
2990   TSAN_INTERCEPT(pthread_mutex_clocklock);
2991 #endif
2992 #if SANITIZER_GLIBC
2993 #  if !__GLIBC_PREREQ(2, 34)
2994   TSAN_INTERCEPT(__pthread_mutex_lock);
2995   TSAN_INTERCEPT(__pthread_mutex_unlock);
2996 #  endif
2997 #endif
2998 
2999   TSAN_INTERCEPT(pthread_spin_init);
3000   TSAN_INTERCEPT(pthread_spin_destroy);
3001   TSAN_INTERCEPT(pthread_spin_lock);
3002   TSAN_INTERCEPT(pthread_spin_trylock);
3003   TSAN_INTERCEPT(pthread_spin_unlock);
3004 
3005   TSAN_INTERCEPT(pthread_rwlock_init);
3006   TSAN_INTERCEPT(pthread_rwlock_destroy);
3007   TSAN_INTERCEPT(pthread_rwlock_rdlock);
3008   TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
3009   TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
3010   TSAN_INTERCEPT(pthread_rwlock_wrlock);
3011   TSAN_INTERCEPT(pthread_rwlock_trywrlock);
3012   TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
3013   TSAN_INTERCEPT(pthread_rwlock_unlock);
3014 
3015   TSAN_INTERCEPT(pthread_barrier_init);
3016   TSAN_INTERCEPT(pthread_barrier_destroy);
3017   TSAN_INTERCEPT(pthread_barrier_wait);
3018 
3019   TSAN_INTERCEPT(pthread_once);
3020 
3021   TSAN_MAYBE_INTERCEPT___FXSTAT;
3022   TSAN_MAYBE_INTERCEPT_FSTAT;
3023   TSAN_MAYBE_INTERCEPT_FSTAT64;
3024   TSAN_INTERCEPT(open);
3025   TSAN_MAYBE_INTERCEPT_OPEN64;
3026   TSAN_INTERCEPT(creat);
3027   TSAN_MAYBE_INTERCEPT_CREAT64;
3028   TSAN_INTERCEPT(dup);
3029   TSAN_INTERCEPT(dup2);
3030   TSAN_INTERCEPT(dup3);
3031   TSAN_MAYBE_INTERCEPT_EVENTFD;
3032   TSAN_MAYBE_INTERCEPT_SIGNALFD;
3033   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
3034   TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
3035   TSAN_INTERCEPT(socket);
3036   TSAN_INTERCEPT(socketpair);
3037   TSAN_INTERCEPT(connect);
3038   TSAN_INTERCEPT(bind);
3039   TSAN_INTERCEPT(listen);
3040   TSAN_MAYBE_INTERCEPT_EPOLL;
3041   TSAN_INTERCEPT(close);
3042   TSAN_MAYBE_INTERCEPT___CLOSE;
3043   TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
3044   TSAN_INTERCEPT(pipe);
3045   TSAN_INTERCEPT(pipe2);
3046 
3047   TSAN_INTERCEPT(unlink);
3048   TSAN_INTERCEPT(tmpfile);
3049   TSAN_MAYBE_INTERCEPT_TMPFILE64;
3050   TSAN_INTERCEPT(abort);
3051   TSAN_INTERCEPT(rmdir);
3052   TSAN_INTERCEPT(closedir);
3053 
3054   TSAN_INTERCEPT(sigsuspend);
3055   TSAN_INTERCEPT(sigblock);
3056   TSAN_INTERCEPT(sigsetmask);
3057   TSAN_INTERCEPT(pthread_sigmask);
3058   TSAN_INTERCEPT(raise);
3059   TSAN_INTERCEPT(kill);
3060   TSAN_INTERCEPT(pthread_kill);
3061   TSAN_INTERCEPT(sleep);
3062   TSAN_INTERCEPT(usleep);
3063   TSAN_INTERCEPT(nanosleep);
3064   TSAN_INTERCEPT(pause);
3065   TSAN_INTERCEPT(gettimeofday);
3066   TSAN_INTERCEPT(getaddrinfo);
3067 
3068   TSAN_INTERCEPT(fork);
3069   TSAN_INTERCEPT(vfork);
3070 #if SANITIZER_LINUX
3071   TSAN_INTERCEPT(clone);
3072 #endif
3073 #if !SANITIZER_ANDROID
3074   TSAN_INTERCEPT(dl_iterate_phdr);
3075 #endif
3076   TSAN_MAYBE_INTERCEPT_ON_EXIT;
3077   TSAN_INTERCEPT(__cxa_atexit);
3078   TSAN_INTERCEPT(_exit);
3079 
3080   TSAN_MAYBE_INTERCEPT__LWP_EXIT;
3081   TSAN_MAYBE_INTERCEPT_THR_EXIT;
3082 
3083 #if !SANITIZER_APPLE && !SANITIZER_ANDROID
3084   // Need to setup it, because interceptors check that the function is resolved.
3085   // But atexit is emitted directly into the module, so can't be resolved.
3086   REAL(atexit) = (int(*)(void(*)()))unreachable;
3087 #endif
3088 
3089   if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
3090     Printf("ThreadSanitizer: failed to setup atexit callback\n");
3091     Die();
3092   }
3093   if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) {
3094     Printf("ThreadSanitizer: failed to setup atfork callbacks\n");
3095     Die();
3096   }
3097 
3098 #if !SANITIZER_APPLE && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
3099   if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
3100     Printf("ThreadSanitizer: failed to create thread key\n");
3101     Die();
3102   }
3103 #endif
3104 
3105   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init);
3106   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy);
3107   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal);
3108   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast);
3109   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait);
3110   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init);
3111   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy);
3112   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock);
3113   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock);
3114   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock);
3115   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init);
3116   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy);
3117   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock);
3118   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock);
3119   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock);
3120   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock);
3121   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock);
3122   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once);
3123   TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask);
3124 
3125   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
3126   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
3127   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
3128   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
3129   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
3130   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
3131   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
3132   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_lock);
3133   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
3134   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_unlock);
3135   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
3136   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
3137   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
3138   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
3139   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
3140   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
3141   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
3142   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
3143   TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
3144 
3145   FdInit();
3146 }
3147 
3148 }  // namespace __tsan
3149 
3150 // Invisible barrier for tests.
3151 // There were several unsuccessful iterations for this functionality:
3152 // 1. Initially it was implemented in user code using
3153 //    REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
3154 //    MacOS. Futexes are linux-specific for this matter.
3155 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
3156 //    "as-if synchronized via sleep" messages in reports which failed some
3157 //    output tests.
3158 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
3159 //    visible events, which lead to "failed to restore stack trace" failures.
3160 // Note that no_sanitize_thread attribute does not turn off atomic interception
3161 // so attaching it to the function defined in user code does not help.
3162 // That's why we now have what we have.
3163 constexpr u32 kBarrierThreadBits = 10;
3164 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits;
3165 
3166 extern "C" {
3167 
3168 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init(
3169     atomic_uint32_t *barrier, u32 num_threads) {
3170   if (num_threads >= kBarrierThreads) {
3171     Printf("barrier_init: count is too large (%d)\n", num_threads);
3172     Die();
3173   }
3174   // kBarrierThreadBits lsb is thread count,
3175   // the remaining are count of entered threads.
3176   atomic_store(barrier, num_threads, memory_order_relaxed);
3177 }
3178 
3179 static u32 barrier_epoch(u32 value) {
3180   return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1));
3181 }
3182 
3183 SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait(
3184     atomic_uint32_t *barrier) {
3185   u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed);
3186   u32 old_epoch = barrier_epoch(old);
3187   if (barrier_epoch(old + kBarrierThreads) != old_epoch) {
3188     FutexWake(barrier, (1 << 30));
3189     return;
3190   }
3191   for (;;) {
3192     u32 cur = atomic_load(barrier, memory_order_relaxed);
3193     if (barrier_epoch(cur) != old_epoch)
3194       return;
3195     FutexWait(barrier, cur);
3196   }
3197 }
3198 
3199 }  // extern "C"
3200