1 //===-- tsan_interceptors.cc ----------------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is a part of ThreadSanitizer (TSan), a race detector. 9 // 10 // FIXME: move as many interceptors as possible into 11 // sanitizer_common/sanitizer_common_interceptors.inc 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_atomic.h" 15 #include "sanitizer_common/sanitizer_errno.h" 16 #include "sanitizer_common/sanitizer_libc.h" 17 #include "sanitizer_common/sanitizer_linux.h" 18 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 19 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 20 #include "sanitizer_common/sanitizer_placement_new.h" 21 #include "sanitizer_common/sanitizer_posix.h" 22 #include "sanitizer_common/sanitizer_stacktrace.h" 23 #include "sanitizer_common/sanitizer_tls_get_addr.h" 24 #include "interception/interception.h" 25 #include "tsan_interceptors.h" 26 #include "tsan_interface.h" 27 #include "tsan_platform.h" 28 #include "tsan_suppressions.h" 29 #include "tsan_rtl.h" 30 #include "tsan_mman.h" 31 #include "tsan_fd.h" 32 33 34 using namespace __tsan; // NOLINT 35 36 #if SANITIZER_FREEBSD || SANITIZER_MAC 37 #define stdout __stdoutp 38 #define stderr __stderrp 39 #endif 40 41 #if SANITIZER_NETBSD 42 #define dirfd(dirp) (*(int *)(dirp)) 43 #define fileno_unlocked(fp) \ 44 (((__sanitizer_FILE *)fp)->_file == -1 \ 45 ? -1 \ 46 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file)) 47 48 #define stdout ((__sanitizer_FILE*)&__sF[1]) 49 #define stderr ((__sanitizer_FILE*)&__sF[2]) 50 51 #define nanosleep __nanosleep50 52 #define vfork __vfork14 53 #endif 54 55 #if SANITIZER_ANDROID 56 #define mallopt(a, b) 57 #endif 58 59 #ifdef __mips__ 60 const int kSigCount = 129; 61 #else 62 const int kSigCount = 65; 63 #endif 64 65 #ifdef __mips__ 66 struct ucontext_t { 67 u64 opaque[768 / sizeof(u64) + 1]; 68 }; 69 #else 70 struct ucontext_t { 71 // The size is determined by looking at sizeof of real ucontext_t on linux. 72 u64 opaque[936 / sizeof(u64) + 1]; 73 }; 74 #endif 75 76 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 77 #define PTHREAD_ABI_BASE "GLIBC_2.3.2" 78 #elif defined(__aarch64__) || SANITIZER_PPC64V2 79 #define PTHREAD_ABI_BASE "GLIBC_2.17" 80 #endif 81 82 extern "C" int pthread_attr_init(void *attr); 83 extern "C" int pthread_attr_destroy(void *attr); 84 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) 85 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); 86 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); 87 extern "C" int pthread_setspecific(unsigned key, const void *v); 88 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) 89 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) 90 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) 91 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) 92 extern "C" void *pthread_self(); 93 extern "C" void _exit(int status); 94 #if !SANITIZER_NETBSD 95 extern "C" int fileno_unlocked(void *stream); 96 extern "C" int dirfd(void *dirp); 97 #endif 98 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD 99 extern "C" int mallopt(int param, int value); 100 #endif 101 #if SANITIZER_NETBSD 102 extern __sanitizer_FILE __sF[]; 103 #else 104 extern __sanitizer_FILE *stdout, *stderr; 105 #endif 106 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 107 const int PTHREAD_MUTEX_RECURSIVE = 1; 108 const int PTHREAD_MUTEX_RECURSIVE_NP = 1; 109 #else 110 const int PTHREAD_MUTEX_RECURSIVE = 2; 111 const int PTHREAD_MUTEX_RECURSIVE_NP = 2; 112 #endif 113 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 114 const int EPOLL_CTL_ADD = 1; 115 #endif 116 const int SIGILL = 4; 117 const int SIGABRT = 6; 118 const int SIGFPE = 8; 119 const int SIGSEGV = 11; 120 const int SIGPIPE = 13; 121 const int SIGTERM = 15; 122 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 123 const int SIGBUS = 10; 124 const int SIGSYS = 12; 125 #else 126 const int SIGBUS = 7; 127 const int SIGSYS = 31; 128 #endif 129 void *const MAP_FAILED = (void*)-1; 130 #if SANITIZER_NETBSD 131 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567; 132 #elif !SANITIZER_MAC 133 const int PTHREAD_BARRIER_SERIAL_THREAD = -1; 134 #endif 135 const int MAP_FIXED = 0x10; 136 typedef long long_t; // NOLINT 137 138 // From /usr/include/unistd.h 139 # define F_ULOCK 0 /* Unlock a previously locked region. */ 140 # define F_LOCK 1 /* Lock a region for exclusive use. */ 141 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ 142 # define F_TEST 3 /* Test a region for other processes locks. */ 143 144 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 145 const int SA_SIGINFO = 0x40; 146 const int SIG_SETMASK = 3; 147 #elif defined(__mips__) 148 const int SA_SIGINFO = 8; 149 const int SIG_SETMASK = 3; 150 #else 151 const int SA_SIGINFO = 4; 152 const int SIG_SETMASK = 2; 153 #endif 154 155 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ 156 (!cur_thread()->is_inited) 157 158 namespace __tsan { 159 struct SignalDesc { 160 bool armed; 161 bool sigaction; 162 __sanitizer_siginfo siginfo; 163 ucontext_t ctx; 164 }; 165 166 struct ThreadSignalContext { 167 int int_signal_send; 168 atomic_uintptr_t in_blocking_func; 169 atomic_uintptr_t have_pending_signals; 170 SignalDesc pending_signals[kSigCount]; 171 // emptyset and oldset are too big for stack. 172 __sanitizer_sigset_t emptyset; 173 __sanitizer_sigset_t oldset; 174 }; 175 176 // The sole reason tsan wraps atexit callbacks is to establish synchronization 177 // between callback setup and callback execution. 178 struct AtExitCtx { 179 void (*f)(); 180 void *arg; 181 }; 182 183 // InterceptorContext holds all global data required for interceptors. 184 // It's explicitly constructed in InitializeInterceptors with placement new 185 // and is never destroyed. This allows usage of members with non-trivial 186 // constructors and destructors. 187 struct InterceptorContext { 188 // The object is 64-byte aligned, because we want hot data to be located 189 // in a single cache line if possible (it's accessed in every interceptor). 190 ALIGNED(64) LibIgnore libignore; 191 __sanitizer_sigaction sigactions[kSigCount]; 192 #if !SANITIZER_MAC && !SANITIZER_NETBSD 193 unsigned finalize_key; 194 #endif 195 196 BlockingMutex atexit_mu; 197 Vector<struct AtExitCtx *> AtExitStack; 198 199 InterceptorContext() 200 : libignore(LINKER_INITIALIZED), AtExitStack() { 201 } 202 }; 203 204 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)]; 205 InterceptorContext *interceptor_ctx() { 206 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]); 207 } 208 209 LibIgnore *libignore() { 210 return &interceptor_ctx()->libignore; 211 } 212 213 void InitializeLibIgnore() { 214 const SuppressionContext &supp = *Suppressions(); 215 const uptr n = supp.SuppressionCount(); 216 for (uptr i = 0; i < n; i++) { 217 const Suppression *s = supp.SuppressionAt(i); 218 if (0 == internal_strcmp(s->type, kSuppressionLib)) 219 libignore()->AddIgnoredLibrary(s->templ); 220 } 221 if (flags()->ignore_noninstrumented_modules) 222 libignore()->IgnoreNoninstrumentedModules(true); 223 libignore()->OnLibraryLoaded(0); 224 } 225 226 } // namespace __tsan 227 228 static ThreadSignalContext *SigCtx(ThreadState *thr) { 229 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; 230 if (ctx == 0 && !thr->is_dead) { 231 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); 232 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); 233 thr->signal_ctx = ctx; 234 } 235 return ctx; 236 } 237 238 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, 239 uptr pc) 240 : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) { 241 Initialize(thr); 242 if (!thr_->is_inited) return; 243 if (!thr_->ignore_interceptors) FuncEntry(thr, pc); 244 DPrintf("#%d: intercept %s()\n", thr_->tid, fname); 245 ignoring_ = 246 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses || 247 libignore()->IsIgnored(pc, &in_ignored_lib_)); 248 EnableIgnores(); 249 } 250 251 ScopedInterceptor::~ScopedInterceptor() { 252 if (!thr_->is_inited) return; 253 DisableIgnores(); 254 if (!thr_->ignore_interceptors) { 255 ProcessPendingSignals(thr_); 256 FuncExit(thr_); 257 CheckNoLocks(thr_); 258 } 259 } 260 261 void ScopedInterceptor::EnableIgnores() { 262 if (ignoring_) { 263 ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false); 264 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++; 265 if (in_ignored_lib_) { 266 DCHECK(!thr_->in_ignored_lib); 267 thr_->in_ignored_lib = true; 268 } 269 } 270 } 271 272 void ScopedInterceptor::DisableIgnores() { 273 if (ignoring_) { 274 ThreadIgnoreEnd(thr_, pc_); 275 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--; 276 if (in_ignored_lib_) { 277 DCHECK(thr_->in_ignored_lib); 278 thr_->in_ignored_lib = false; 279 } 280 } 281 } 282 283 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) 284 #if SANITIZER_FREEBSD 285 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 286 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) 287 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) 288 #elif SANITIZER_NETBSD 289 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 290 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \ 291 INTERCEPT_FUNCTION(__libc_##func) 292 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \ 293 INTERCEPT_FUNCTION(__libc_thr_##func) 294 #else 295 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) 296 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) 297 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) 298 #endif 299 300 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ 301 MemoryAccessRange((thr), (pc), (uptr)(s), \ 302 common_flags()->strict_string_checks ? (len) + 1 : (n), false) 303 304 #define READ_STRING(thr, pc, s, n) \ 305 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) 306 307 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) 308 309 struct BlockingCall { 310 explicit BlockingCall(ThreadState *thr) 311 : thr(thr) 312 , ctx(SigCtx(thr)) { 313 for (;;) { 314 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); 315 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0) 316 break; 317 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 318 ProcessPendingSignals(thr); 319 } 320 // When we are in a "blocking call", we process signals asynchronously 321 // (right when they arrive). In this context we do not expect to be 322 // executing any user/runtime code. The known interceptor sequence when 323 // this is not true is: pthread_join -> munmap(stack). It's fine 324 // to ignore munmap in this case -- we handle stack shadow separately. 325 thr->ignore_interceptors++; 326 } 327 328 ~BlockingCall() { 329 thr->ignore_interceptors--; 330 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 331 } 332 333 ThreadState *thr; 334 ThreadSignalContext *ctx; 335 }; 336 337 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { 338 SCOPED_TSAN_INTERCEPTOR(sleep, sec); 339 unsigned res = BLOCK_REAL(sleep)(sec); 340 AfterSleep(thr, pc); 341 return res; 342 } 343 344 TSAN_INTERCEPTOR(int, usleep, long_t usec) { 345 SCOPED_TSAN_INTERCEPTOR(usleep, usec); 346 int res = BLOCK_REAL(usleep)(usec); 347 AfterSleep(thr, pc); 348 return res; 349 } 350 351 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { 352 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); 353 int res = BLOCK_REAL(nanosleep)(req, rem); 354 AfterSleep(thr, pc); 355 return res; 356 } 357 358 TSAN_INTERCEPTOR(int, pause, int fake) { 359 SCOPED_TSAN_INTERCEPTOR(pause, fake); 360 return BLOCK_REAL(pause)(fake); 361 } 362 363 static void at_exit_wrapper() { 364 AtExitCtx *ctx; 365 { 366 // Ensure thread-safety. 367 BlockingMutexLock l(&interceptor_ctx()->atexit_mu); 368 369 // Pop AtExitCtx from the top of the stack of callback functions 370 uptr element = interceptor_ctx()->AtExitStack.Size() - 1; 371 ctx = interceptor_ctx()->AtExitStack[element]; 372 interceptor_ctx()->AtExitStack.PopBack(); 373 } 374 375 Acquire(cur_thread(), (uptr)0, (uptr)ctx); 376 ((void(*)())ctx->f)(); 377 InternalFree(ctx); 378 } 379 380 static void cxa_at_exit_wrapper(void *arg) { 381 Acquire(cur_thread(), 0, (uptr)arg); 382 AtExitCtx *ctx = (AtExitCtx*)arg; 383 ((void(*)(void *arg))ctx->f)(ctx->arg); 384 InternalFree(ctx); 385 } 386 387 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 388 void *arg, void *dso); 389 390 #if !SANITIZER_ANDROID 391 TSAN_INTERCEPTOR(int, atexit, void (*f)()) { 392 if (UNLIKELY(cur_thread()->in_symbolizer)) 393 return 0; 394 // We want to setup the atexit callback even if we are in ignored lib 395 // or after fork. 396 SCOPED_INTERCEPTOR_RAW(atexit, f); 397 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); 398 } 399 #endif 400 401 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { 402 if (UNLIKELY(cur_thread()->in_symbolizer)) 403 return 0; 404 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); 405 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); 406 } 407 408 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 409 void *arg, void *dso) { 410 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); 411 ctx->f = f; 412 ctx->arg = arg; 413 Release(thr, pc, (uptr)ctx); 414 // Memory allocation in __cxa_atexit will race with free during exit, 415 // because we do not see synchronization around atexit callback list. 416 ThreadIgnoreBegin(thr, pc); 417 int res; 418 if (!dso) { 419 // NetBSD does not preserve the 2nd argument if dso is equal to 0 420 // Store ctx in a local stack-like structure 421 422 // Ensure thread-safety. 423 BlockingMutexLock l(&interceptor_ctx()->atexit_mu); 424 425 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0); 426 // Push AtExitCtx on the top of the stack of callback functions 427 if (!res) { 428 interceptor_ctx()->AtExitStack.PushBack(ctx); 429 } 430 } else { 431 res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso); 432 } 433 ThreadIgnoreEnd(thr, pc); 434 return res; 435 } 436 437 #if !SANITIZER_MAC && !SANITIZER_NETBSD 438 static void on_exit_wrapper(int status, void *arg) { 439 ThreadState *thr = cur_thread(); 440 uptr pc = 0; 441 Acquire(thr, pc, (uptr)arg); 442 AtExitCtx *ctx = (AtExitCtx*)arg; 443 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); 444 InternalFree(ctx); 445 } 446 447 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { 448 if (UNLIKELY(cur_thread()->in_symbolizer)) 449 return 0; 450 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); 451 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); 452 ctx->f = (void(*)())f; 453 ctx->arg = arg; 454 Release(thr, pc, (uptr)ctx); 455 // Memory allocation in __cxa_atexit will race with free during exit, 456 // because we do not see synchronization around atexit callback list. 457 ThreadIgnoreBegin(thr, pc); 458 int res = REAL(on_exit)(on_exit_wrapper, ctx); 459 ThreadIgnoreEnd(thr, pc); 460 return res; 461 } 462 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit) 463 #else 464 #define TSAN_MAYBE_INTERCEPT_ON_EXIT 465 #endif 466 467 // Cleanup old bufs. 468 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { 469 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 470 JmpBuf *buf = &thr->jmp_bufs[i]; 471 if (buf->sp <= sp) { 472 uptr sz = thr->jmp_bufs.Size(); 473 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); 474 thr->jmp_bufs.PopBack(); 475 i--; 476 } 477 } 478 } 479 480 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { 481 if (!thr->is_inited) // called from libc guts during bootstrap 482 return; 483 // Cleanup old bufs. 484 JmpBufGarbageCollect(thr, sp); 485 // Remember the buf. 486 JmpBuf *buf = thr->jmp_bufs.PushBack(); 487 buf->sp = sp; 488 buf->mangled_sp = mangled_sp; 489 buf->shadow_stack_pos = thr->shadow_stack_pos; 490 ThreadSignalContext *sctx = SigCtx(thr); 491 buf->int_signal_send = sctx ? sctx->int_signal_send : 0; 492 buf->in_blocking_func = sctx ? 493 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : 494 false; 495 buf->in_signal_handler = atomic_load(&thr->in_signal_handler, 496 memory_order_relaxed); 497 } 498 499 static void LongJmp(ThreadState *thr, uptr *env) { 500 #if SANITIZER_NETBSD 501 # ifdef __x86_64__ 502 uptr mangled_sp = env[6]; 503 # else 504 # error Unsupported 505 # endif 506 #elif defined(__powerpc__) 507 uptr mangled_sp = env[0]; 508 #elif SANITIZER_FREEBSD 509 uptr mangled_sp = env[2]; 510 #elif SANITIZER_MAC 511 # ifdef __aarch64__ 512 uptr mangled_sp = 513 (GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? env[12] : env[13]; 514 # else 515 uptr mangled_sp = env[2]; 516 # endif 517 #elif SANITIZER_LINUX 518 # ifdef __aarch64__ 519 uptr mangled_sp = env[13]; 520 # elif defined(__mips64) 521 uptr mangled_sp = env[1]; 522 # else 523 uptr mangled_sp = env[6]; 524 # endif 525 #endif 526 // Find the saved buf by mangled_sp. 527 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 528 JmpBuf *buf = &thr->jmp_bufs[i]; 529 if (buf->mangled_sp == mangled_sp) { 530 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); 531 // Unwind the stack. 532 while (thr->shadow_stack_pos > buf->shadow_stack_pos) 533 FuncExit(thr); 534 ThreadSignalContext *sctx = SigCtx(thr); 535 if (sctx) { 536 sctx->int_signal_send = buf->int_signal_send; 537 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, 538 memory_order_relaxed); 539 } 540 atomic_store(&thr->in_signal_handler, buf->in_signal_handler, 541 memory_order_relaxed); 542 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp 543 return; 544 } 545 } 546 Printf("ThreadSanitizer: can't find longjmp buf\n"); 547 CHECK(0); 548 } 549 550 // FIXME: put everything below into a common extern "C" block? 551 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { 552 SetJmp(cur_thread(), sp, mangled_sp); 553 } 554 555 #if SANITIZER_MAC 556 TSAN_INTERCEPTOR(int, setjmp, void *env); 557 TSAN_INTERCEPTOR(int, _setjmp, void *env); 558 TSAN_INTERCEPTOR(int, sigsetjmp, void *env); 559 #else // SANITIZER_MAC 560 561 #if SANITIZER_NETBSD 562 #define setjmp_symname __setjmp14 563 #define sigsetjmp_symname __sigsetjmp14 564 #else 565 #define setjmp_symname setjmp 566 #define sigsetjmp_symname sigsetjmp 567 #endif 568 569 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x 570 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x) 571 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname) 572 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname) 573 574 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname) 575 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname) 576 577 // Not called. Merely to satisfy TSAN_INTERCEPT(). 578 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 579 int TSAN_INTERCEPTOR_SETJMP(void *env); 580 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) { 581 CHECK(0); 582 return 0; 583 } 584 585 // FIXME: any reason to have a separate declaration? 586 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 587 int __interceptor__setjmp(void *env); 588 extern "C" int __interceptor__setjmp(void *env) { 589 CHECK(0); 590 return 0; 591 } 592 593 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 594 int TSAN_INTERCEPTOR_SIGSETJMP(void *env); 595 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) { 596 CHECK(0); 597 return 0; 598 } 599 600 #if !SANITIZER_NETBSD 601 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 602 int __interceptor___sigsetjmp(void *env); 603 extern "C" int __interceptor___sigsetjmp(void *env) { 604 CHECK(0); 605 return 0; 606 } 607 #endif 608 609 extern "C" int setjmp_symname(void *env); 610 extern "C" int _setjmp(void *env); 611 extern "C" int sigsetjmp_symname(void *env); 612 #if !SANITIZER_NETBSD 613 extern "C" int __sigsetjmp(void *env); 614 #endif 615 DEFINE_REAL(int, setjmp_symname, void *env) 616 DEFINE_REAL(int, _setjmp, void *env) 617 DEFINE_REAL(int, sigsetjmp_symname, void *env) 618 #if !SANITIZER_NETBSD 619 DEFINE_REAL(int, __sigsetjmp, void *env) 620 #endif 621 #endif // SANITIZER_MAC 622 623 #if SANITIZER_NETBSD 624 #define longjmp_symname __longjmp14 625 #define siglongjmp_symname __siglongjmp14 626 #else 627 #define longjmp_symname longjmp 628 #define siglongjmp_symname siglongjmp 629 #endif 630 631 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) { 632 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor, 633 // bad things will happen. We will jump over ScopedInterceptor dtor and can 634 // leave thr->in_ignored_lib set. 635 { 636 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val); 637 } 638 LongJmp(cur_thread(), env); 639 REAL(longjmp_symname)(env, val); 640 } 641 642 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) { 643 { 644 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val); 645 } 646 LongJmp(cur_thread(), env); 647 REAL(siglongjmp_symname)(env, val); 648 } 649 650 #if SANITIZER_NETBSD 651 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) { 652 { 653 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val); 654 } 655 LongJmp(cur_thread(), env); 656 REAL(_longjmp)(env, val); 657 } 658 #endif 659 660 #if !SANITIZER_MAC 661 TSAN_INTERCEPTOR(void*, malloc, uptr size) { 662 if (UNLIKELY(cur_thread()->in_symbolizer)) 663 return InternalAlloc(size); 664 void *p = 0; 665 { 666 SCOPED_INTERCEPTOR_RAW(malloc, size); 667 p = user_alloc(thr, pc, size); 668 } 669 invoke_malloc_hook(p, size); 670 return p; 671 } 672 673 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { 674 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); 675 return user_memalign(thr, pc, align, sz); 676 } 677 678 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { 679 if (UNLIKELY(cur_thread()->in_symbolizer)) 680 return InternalCalloc(size, n); 681 void *p = 0; 682 { 683 SCOPED_INTERCEPTOR_RAW(calloc, size, n); 684 p = user_calloc(thr, pc, size, n); 685 } 686 invoke_malloc_hook(p, n * size); 687 return p; 688 } 689 690 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { 691 if (UNLIKELY(cur_thread()->in_symbolizer)) 692 return InternalRealloc(p, size); 693 if (p) 694 invoke_free_hook(p); 695 { 696 SCOPED_INTERCEPTOR_RAW(realloc, p, size); 697 p = user_realloc(thr, pc, p, size); 698 } 699 invoke_malloc_hook(p, size); 700 return p; 701 } 702 703 TSAN_INTERCEPTOR(void, free, void *p) { 704 if (p == 0) 705 return; 706 if (UNLIKELY(cur_thread()->in_symbolizer)) 707 return InternalFree(p); 708 invoke_free_hook(p); 709 SCOPED_INTERCEPTOR_RAW(free, p); 710 user_free(thr, pc, p); 711 } 712 713 TSAN_INTERCEPTOR(void, cfree, void *p) { 714 if (p == 0) 715 return; 716 if (UNLIKELY(cur_thread()->in_symbolizer)) 717 return InternalFree(p); 718 invoke_free_hook(p); 719 SCOPED_INTERCEPTOR_RAW(cfree, p); 720 user_free(thr, pc, p); 721 } 722 723 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { 724 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); 725 return user_alloc_usable_size(p); 726 } 727 #endif 728 729 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT 730 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT 731 uptr srclen = internal_strlen(src); 732 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); 733 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); 734 return REAL(strcpy)(dst, src); // NOLINT 735 } 736 737 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { 738 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); 739 uptr srclen = internal_strnlen(src, n); 740 MemoryAccessRange(thr, pc, (uptr)dst, n, true); 741 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); 742 return REAL(strncpy)(dst, src, n); 743 } 744 745 TSAN_INTERCEPTOR(char*, strdup, const char *str) { 746 SCOPED_TSAN_INTERCEPTOR(strdup, str); 747 // strdup will call malloc, so no instrumentation is required here. 748 return REAL(strdup)(str); 749 } 750 751 static bool fix_mmap_addr(void **addr, long_t sz, int flags) { 752 if (*addr) { 753 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { 754 if (flags & MAP_FIXED) { 755 errno = errno_EINVAL; 756 return false; 757 } else { 758 *addr = 0; 759 } 760 } 761 } 762 return true; 763 } 764 765 template <class Mmap> 766 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap, 767 void *addr, SIZE_T sz, int prot, int flags, 768 int fd, OFF64_T off) { 769 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; 770 void *res = real_mmap(addr, sz, prot, flags, fd, off); 771 if (res != MAP_FAILED) { 772 if (fd > 0) FdAccess(thr, pc, fd); 773 if (thr->ignore_reads_and_writes == 0) 774 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); 775 else 776 MemoryResetRange(thr, pc, (uptr)res, sz); 777 } 778 return res; 779 } 780 781 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { 782 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); 783 if (sz != 0) { 784 // If sz == 0, munmap will return EINVAL and don't unmap any memory. 785 DontNeedShadowFor((uptr)addr, sz); 786 ScopedGlobalProcessor sgp; 787 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz); 788 } 789 int res = REAL(munmap)(addr, sz); 790 return res; 791 } 792 793 #if SANITIZER_LINUX 794 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { 795 SCOPED_INTERCEPTOR_RAW(memalign, align, sz); 796 return user_memalign(thr, pc, align, sz); 797 } 798 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) 799 #else 800 #define TSAN_MAYBE_INTERCEPT_MEMALIGN 801 #endif 802 803 #if !SANITIZER_MAC 804 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { 805 if (UNLIKELY(cur_thread()->in_symbolizer)) 806 return InternalAlloc(sz, nullptr, align); 807 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz); 808 return user_aligned_alloc(thr, pc, align, sz); 809 } 810 811 TSAN_INTERCEPTOR(void*, valloc, uptr sz) { 812 if (UNLIKELY(cur_thread()->in_symbolizer)) 813 return InternalAlloc(sz, nullptr, GetPageSizeCached()); 814 SCOPED_INTERCEPTOR_RAW(valloc, sz); 815 return user_valloc(thr, pc, sz); 816 } 817 #endif 818 819 #if SANITIZER_LINUX 820 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { 821 if (UNLIKELY(cur_thread()->in_symbolizer)) { 822 uptr PageSize = GetPageSizeCached(); 823 sz = sz ? RoundUpTo(sz, PageSize) : PageSize; 824 return InternalAlloc(sz, nullptr, PageSize); 825 } 826 SCOPED_INTERCEPTOR_RAW(pvalloc, sz); 827 return user_pvalloc(thr, pc, sz); 828 } 829 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) 830 #else 831 #define TSAN_MAYBE_INTERCEPT_PVALLOC 832 #endif 833 834 #if !SANITIZER_MAC 835 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { 836 if (UNLIKELY(cur_thread()->in_symbolizer)) { 837 void *p = InternalAlloc(sz, nullptr, align); 838 if (!p) 839 return errno_ENOMEM; 840 *memptr = p; 841 return 0; 842 } 843 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); 844 return user_posix_memalign(thr, pc, memptr, align, sz); 845 } 846 #endif 847 848 // __cxa_guard_acquire and friends need to be intercepted in a special way - 849 // regular interceptors will break statically-linked libstdc++. Linux 850 // interceptors are especially defined as weak functions (so that they don't 851 // cause link errors when user defines them as well). So they silently 852 // auto-disable themselves when such symbol is already present in the binary. If 853 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which 854 // will silently replace our interceptor. That's why on Linux we simply export 855 // these interceptors with INTERFACE_ATTRIBUTE. 856 // On OS X, we don't support statically linking, so we just use a regular 857 // interceptor. 858 #if SANITIZER_MAC 859 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR 860 #else 861 #define STDCXX_INTERCEPTOR(rettype, name, ...) \ 862 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) 863 #endif 864 865 // Used in thread-safe function static initialization. 866 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { 867 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); 868 for (;;) { 869 u32 cmp = atomic_load(g, memory_order_acquire); 870 if (cmp == 0) { 871 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) 872 return 1; 873 } else if (cmp == 1) { 874 Acquire(thr, pc, (uptr)g); 875 return 0; 876 } else { 877 internal_sched_yield(); 878 } 879 } 880 } 881 882 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { 883 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); 884 Release(thr, pc, (uptr)g); 885 atomic_store(g, 1, memory_order_release); 886 } 887 888 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { 889 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); 890 atomic_store(g, 0, memory_order_relaxed); 891 } 892 893 namespace __tsan { 894 void DestroyThreadState() { 895 ThreadState *thr = cur_thread(); 896 Processor *proc = thr->proc(); 897 ThreadFinish(thr); 898 ProcUnwire(proc, thr); 899 ProcDestroy(proc); 900 ThreadSignalContext *sctx = thr->signal_ctx; 901 if (sctx) { 902 thr->signal_ctx = 0; 903 UnmapOrDie(sctx, sizeof(*sctx)); 904 } 905 DTLS_Destroy(); 906 cur_thread_finalize(); 907 } 908 } // namespace __tsan 909 910 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 911 static void thread_finalize(void *v) { 912 uptr iter = (uptr)v; 913 if (iter > 1) { 914 if (pthread_setspecific(interceptor_ctx()->finalize_key, 915 (void*)(iter - 1))) { 916 Printf("ThreadSanitizer: failed to set thread key\n"); 917 Die(); 918 } 919 return; 920 } 921 DestroyThreadState(); 922 } 923 #endif 924 925 926 struct ThreadParam { 927 void* (*callback)(void *arg); 928 void *param; 929 atomic_uintptr_t tid; 930 }; 931 932 extern "C" void *__tsan_thread_start_func(void *arg) { 933 ThreadParam *p = (ThreadParam*)arg; 934 void* (*callback)(void *arg) = p->callback; 935 void *param = p->param; 936 int tid = 0; 937 { 938 ThreadState *thr = cur_thread(); 939 // Thread-local state is not initialized yet. 940 ScopedIgnoreInterceptors ignore; 941 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 942 ThreadIgnoreBegin(thr, 0); 943 if (pthread_setspecific(interceptor_ctx()->finalize_key, 944 (void *)GetPthreadDestructorIterations())) { 945 Printf("ThreadSanitizer: failed to set thread key\n"); 946 Die(); 947 } 948 ThreadIgnoreEnd(thr, 0); 949 #endif 950 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) 951 internal_sched_yield(); 952 Processor *proc = ProcCreate(); 953 ProcWire(proc, thr); 954 ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); 955 atomic_store(&p->tid, 0, memory_order_release); 956 } 957 void *res = callback(param); 958 // Prevent the callback from being tail called, 959 // it mixes up stack traces. 960 volatile int foo = 42; 961 foo++; 962 return res; 963 } 964 965 TSAN_INTERCEPTOR(int, pthread_create, 966 void *th, void *attr, void *(*callback)(void*), void * param) { 967 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); 968 969 MaybeSpawnBackgroundThread(); 970 971 if (ctx->after_multithreaded_fork) { 972 if (flags()->die_after_fork) { 973 Report("ThreadSanitizer: starting new threads after multi-threaded " 974 "fork is not supported. Dying (set die_after_fork=0 to override)\n"); 975 Die(); 976 } else { 977 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded " 978 "fork is not supported (pid %d). Continuing because of " 979 "die_after_fork=0, but you are on your own\n", internal_getpid()); 980 } 981 } 982 __sanitizer_pthread_attr_t myattr; 983 if (attr == 0) { 984 pthread_attr_init(&myattr); 985 attr = &myattr; 986 } 987 int detached = 0; 988 REAL(pthread_attr_getdetachstate)(attr, &detached); 989 AdjustStackSize(attr); 990 991 ThreadParam p; 992 p.callback = callback; 993 p.param = param; 994 atomic_store(&p.tid, 0, memory_order_relaxed); 995 int res = -1; 996 { 997 // Otherwise we see false positives in pthread stack manipulation. 998 ScopedIgnoreInterceptors ignore; 999 ThreadIgnoreBegin(thr, pc); 1000 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); 1001 ThreadIgnoreEnd(thr, pc); 1002 } 1003 if (res == 0) { 1004 int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached)); 1005 CHECK_NE(tid, 0); 1006 // Synchronization on p.tid serves two purposes: 1007 // 1. ThreadCreate must finish before the new thread starts. 1008 // Otherwise the new thread can call pthread_detach, but the pthread_t 1009 // identifier is not yet registered in ThreadRegistry by ThreadCreate. 1010 // 2. ThreadStart must finish before this thread continues. 1011 // Otherwise, this thread can call pthread_detach and reset thr->sync 1012 // before the new thread got a chance to acquire from it in ThreadStart. 1013 atomic_store(&p.tid, tid, memory_order_release); 1014 while (atomic_load(&p.tid, memory_order_acquire) != 0) 1015 internal_sched_yield(); 1016 } 1017 if (attr == &myattr) 1018 pthread_attr_destroy(&myattr); 1019 return res; 1020 } 1021 1022 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { 1023 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); 1024 int tid = ThreadTid(thr, pc, (uptr)th); 1025 ThreadIgnoreBegin(thr, pc); 1026 int res = BLOCK_REAL(pthread_join)(th, ret); 1027 ThreadIgnoreEnd(thr, pc); 1028 if (res == 0) { 1029 ThreadJoin(thr, pc, tid); 1030 } 1031 return res; 1032 } 1033 1034 DEFINE_REAL_PTHREAD_FUNCTIONS 1035 1036 TSAN_INTERCEPTOR(int, pthread_detach, void *th) { 1037 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); 1038 int tid = ThreadTid(thr, pc, (uptr)th); 1039 int res = REAL(pthread_detach)(th); 1040 if (res == 0) { 1041 ThreadDetach(thr, pc, tid); 1042 } 1043 return res; 1044 } 1045 1046 // Problem: 1047 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). 1048 // pthread_cond_t has different size in the different versions. 1049 // If call new REAL functions for old pthread_cond_t, they will corrupt memory 1050 // after pthread_cond_t (old cond is smaller). 1051 // If we call old REAL functions for new pthread_cond_t, we will lose some 1052 // functionality (e.g. old functions do not support waiting against 1053 // CLOCK_REALTIME). 1054 // Proper handling would require to have 2 versions of interceptors as well. 1055 // But this is messy, in particular requires linker scripts when sanitizer 1056 // runtime is linked into a shared library. 1057 // Instead we assume we don't have dynamic libraries built against old 1058 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag 1059 // that allows to work with old libraries (but this mode does not support 1060 // some features, e.g. pthread_condattr_getpshared). 1061 static void *init_cond(void *c, bool force = false) { 1062 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. 1063 // So we allocate additional memory on the side large enough to hold 1064 // any pthread_cond_t object. Always call new REAL functions, but pass 1065 // the aux object to them. 1066 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes 1067 // first word of pthread_cond_t to zero. 1068 // It's all relevant only for linux. 1069 if (!common_flags()->legacy_pthread_cond) 1070 return c; 1071 atomic_uintptr_t *p = (atomic_uintptr_t*)c; 1072 uptr cond = atomic_load(p, memory_order_acquire); 1073 if (!force && cond != 0) 1074 return (void*)cond; 1075 void *newcond = WRAP(malloc)(pthread_cond_t_sz); 1076 internal_memset(newcond, 0, pthread_cond_t_sz); 1077 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, 1078 memory_order_acq_rel)) 1079 return newcond; 1080 WRAP(free)(newcond); 1081 return (void*)cond; 1082 } 1083 1084 struct CondMutexUnlockCtx { 1085 ScopedInterceptor *si; 1086 ThreadState *thr; 1087 uptr pc; 1088 void *m; 1089 }; 1090 1091 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { 1092 // pthread_cond_wait interceptor has enabled async signal delivery 1093 // (see BlockingCall below). Disable async signals since we are running 1094 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run 1095 // since the thread is cancelled, so we have to manually execute them 1096 // (the thread still can run some user code due to pthread_cleanup_push). 1097 ThreadSignalContext *ctx = SigCtx(arg->thr); 1098 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); 1099 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 1100 MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock); 1101 // Undo BlockingCall ctor effects. 1102 arg->thr->ignore_interceptors--; 1103 arg->si->~ScopedInterceptor(); 1104 } 1105 1106 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { 1107 void *cond = init_cond(c, true); 1108 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); 1109 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1110 return REAL(pthread_cond_init)(cond, a); 1111 } 1112 1113 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, 1114 int (*fn)(void *c, void *m, void *abstime), void *c, 1115 void *m, void *t) { 1116 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1117 MutexUnlock(thr, pc, (uptr)m); 1118 CondMutexUnlockCtx arg = {si, thr, pc, m}; 1119 int res = 0; 1120 // This ensures that we handle mutex lock even in case of pthread_cancel. 1121 // See test/tsan/cond_cancel.cc. 1122 { 1123 // Enable signal delivery while the thread is blocked. 1124 BlockingCall bc(thr); 1125 res = call_pthread_cancel_with_cleanup( 1126 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg); 1127 } 1128 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); 1129 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); 1130 return res; 1131 } 1132 1133 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { 1134 void *cond = init_cond(c); 1135 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); 1136 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL( 1137 pthread_cond_wait), 1138 cond, m, 0); 1139 } 1140 1141 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { 1142 void *cond = init_cond(c); 1143 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); 1144 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m, 1145 abstime); 1146 } 1147 1148 #if SANITIZER_MAC 1149 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m, 1150 void *reltime) { 1151 void *cond = init_cond(c); 1152 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime); 1153 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond, 1154 m, reltime); 1155 } 1156 #endif 1157 1158 INTERCEPTOR(int, pthread_cond_signal, void *c) { 1159 void *cond = init_cond(c); 1160 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); 1161 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1162 return REAL(pthread_cond_signal)(cond); 1163 } 1164 1165 INTERCEPTOR(int, pthread_cond_broadcast, void *c) { 1166 void *cond = init_cond(c); 1167 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); 1168 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1169 return REAL(pthread_cond_broadcast)(cond); 1170 } 1171 1172 INTERCEPTOR(int, pthread_cond_destroy, void *c) { 1173 void *cond = init_cond(c); 1174 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); 1175 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1176 int res = REAL(pthread_cond_destroy)(cond); 1177 if (common_flags()->legacy_pthread_cond) { 1178 // Free our aux cond and zero the pointer to not leave dangling pointers. 1179 WRAP(free)(cond); 1180 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); 1181 } 1182 return res; 1183 } 1184 1185 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { 1186 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); 1187 int res = REAL(pthread_mutex_init)(m, a); 1188 if (res == 0) { 1189 u32 flagz = 0; 1190 if (a) { 1191 int type = 0; 1192 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) 1193 if (type == PTHREAD_MUTEX_RECURSIVE || 1194 type == PTHREAD_MUTEX_RECURSIVE_NP) 1195 flagz |= MutexFlagWriteReentrant; 1196 } 1197 MutexCreate(thr, pc, (uptr)m, flagz); 1198 } 1199 return res; 1200 } 1201 1202 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { 1203 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); 1204 int res = REAL(pthread_mutex_destroy)(m); 1205 if (res == 0 || res == errno_EBUSY) { 1206 MutexDestroy(thr, pc, (uptr)m); 1207 } 1208 return res; 1209 } 1210 1211 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { 1212 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); 1213 int res = REAL(pthread_mutex_trylock)(m); 1214 if (res == errno_EOWNERDEAD) 1215 MutexRepair(thr, pc, (uptr)m); 1216 if (res == 0 || res == errno_EOWNERDEAD) 1217 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1218 return res; 1219 } 1220 1221 #if !SANITIZER_MAC 1222 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { 1223 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); 1224 int res = REAL(pthread_mutex_timedlock)(m, abstime); 1225 if (res == 0) { 1226 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1227 } 1228 return res; 1229 } 1230 #endif 1231 1232 #if !SANITIZER_MAC 1233 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { 1234 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); 1235 int res = REAL(pthread_spin_init)(m, pshared); 1236 if (res == 0) { 1237 MutexCreate(thr, pc, (uptr)m); 1238 } 1239 return res; 1240 } 1241 1242 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { 1243 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); 1244 int res = REAL(pthread_spin_destroy)(m); 1245 if (res == 0) { 1246 MutexDestroy(thr, pc, (uptr)m); 1247 } 1248 return res; 1249 } 1250 1251 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { 1252 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); 1253 MutexPreLock(thr, pc, (uptr)m); 1254 int res = REAL(pthread_spin_lock)(m); 1255 if (res == 0) { 1256 MutexPostLock(thr, pc, (uptr)m); 1257 } 1258 return res; 1259 } 1260 1261 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { 1262 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); 1263 int res = REAL(pthread_spin_trylock)(m); 1264 if (res == 0) { 1265 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1266 } 1267 return res; 1268 } 1269 1270 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { 1271 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); 1272 MutexUnlock(thr, pc, (uptr)m); 1273 int res = REAL(pthread_spin_unlock)(m); 1274 return res; 1275 } 1276 #endif 1277 1278 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { 1279 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); 1280 int res = REAL(pthread_rwlock_init)(m, a); 1281 if (res == 0) { 1282 MutexCreate(thr, pc, (uptr)m); 1283 } 1284 return res; 1285 } 1286 1287 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { 1288 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); 1289 int res = REAL(pthread_rwlock_destroy)(m); 1290 if (res == 0) { 1291 MutexDestroy(thr, pc, (uptr)m); 1292 } 1293 return res; 1294 } 1295 1296 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { 1297 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); 1298 MutexPreReadLock(thr, pc, (uptr)m); 1299 int res = REAL(pthread_rwlock_rdlock)(m); 1300 if (res == 0) { 1301 MutexPostReadLock(thr, pc, (uptr)m); 1302 } 1303 return res; 1304 } 1305 1306 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { 1307 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); 1308 int res = REAL(pthread_rwlock_tryrdlock)(m); 1309 if (res == 0) { 1310 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock); 1311 } 1312 return res; 1313 } 1314 1315 #if !SANITIZER_MAC 1316 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { 1317 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); 1318 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); 1319 if (res == 0) { 1320 MutexPostReadLock(thr, pc, (uptr)m); 1321 } 1322 return res; 1323 } 1324 #endif 1325 1326 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { 1327 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); 1328 MutexPreLock(thr, pc, (uptr)m); 1329 int res = REAL(pthread_rwlock_wrlock)(m); 1330 if (res == 0) { 1331 MutexPostLock(thr, pc, (uptr)m); 1332 } 1333 return res; 1334 } 1335 1336 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { 1337 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); 1338 int res = REAL(pthread_rwlock_trywrlock)(m); 1339 if (res == 0) { 1340 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1341 } 1342 return res; 1343 } 1344 1345 #if !SANITIZER_MAC 1346 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { 1347 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); 1348 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); 1349 if (res == 0) { 1350 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1351 } 1352 return res; 1353 } 1354 #endif 1355 1356 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { 1357 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); 1358 MutexReadOrWriteUnlock(thr, pc, (uptr)m); 1359 int res = REAL(pthread_rwlock_unlock)(m); 1360 return res; 1361 } 1362 1363 #if !SANITIZER_MAC 1364 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { 1365 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); 1366 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1367 int res = REAL(pthread_barrier_init)(b, a, count); 1368 return res; 1369 } 1370 1371 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { 1372 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); 1373 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1374 int res = REAL(pthread_barrier_destroy)(b); 1375 return res; 1376 } 1377 1378 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { 1379 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); 1380 Release(thr, pc, (uptr)b); 1381 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1382 int res = REAL(pthread_barrier_wait)(b); 1383 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1384 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { 1385 Acquire(thr, pc, (uptr)b); 1386 } 1387 return res; 1388 } 1389 #endif 1390 1391 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { 1392 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); 1393 if (o == 0 || f == 0) 1394 return errno_EINVAL; 1395 atomic_uint32_t *a; 1396 1397 if (SANITIZER_MAC) 1398 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t))); 1399 else if (SANITIZER_NETBSD) 1400 a = static_cast<atomic_uint32_t*> 1401 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz)); 1402 else 1403 a = static_cast<atomic_uint32_t*>(o); 1404 1405 u32 v = atomic_load(a, memory_order_acquire); 1406 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, 1407 memory_order_relaxed)) { 1408 (*f)(); 1409 if (!thr->in_ignored_lib) 1410 Release(thr, pc, (uptr)o); 1411 atomic_store(a, 2, memory_order_release); 1412 } else { 1413 while (v != 2) { 1414 internal_sched_yield(); 1415 v = atomic_load(a, memory_order_acquire); 1416 } 1417 if (!thr->in_ignored_lib) 1418 Acquire(thr, pc, (uptr)o); 1419 } 1420 return 0; 1421 } 1422 1423 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1424 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { 1425 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); 1426 if (fd > 0) 1427 FdAccess(thr, pc, fd); 1428 return REAL(__fxstat)(version, fd, buf); 1429 } 1430 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) 1431 #else 1432 #define TSAN_MAYBE_INTERCEPT___FXSTAT 1433 #endif 1434 1435 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { 1436 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD 1437 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); 1438 if (fd > 0) 1439 FdAccess(thr, pc, fd); 1440 return REAL(fstat)(fd, buf); 1441 #else 1442 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); 1443 if (fd > 0) 1444 FdAccess(thr, pc, fd); 1445 return REAL(__fxstat)(0, fd, buf); 1446 #endif 1447 } 1448 1449 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1450 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { 1451 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); 1452 if (fd > 0) 1453 FdAccess(thr, pc, fd); 1454 return REAL(__fxstat64)(version, fd, buf); 1455 } 1456 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) 1457 #else 1458 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 1459 #endif 1460 1461 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1462 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { 1463 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); 1464 if (fd > 0) 1465 FdAccess(thr, pc, fd); 1466 return REAL(__fxstat64)(0, fd, buf); 1467 } 1468 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) 1469 #else 1470 #define TSAN_MAYBE_INTERCEPT_FSTAT64 1471 #endif 1472 1473 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { 1474 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); 1475 READ_STRING(thr, pc, name, 0); 1476 int fd = REAL(open)(name, flags, mode); 1477 if (fd >= 0) 1478 FdFileCreate(thr, pc, fd); 1479 return fd; 1480 } 1481 1482 #if SANITIZER_LINUX 1483 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { 1484 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); 1485 READ_STRING(thr, pc, name, 0); 1486 int fd = REAL(open64)(name, flags, mode); 1487 if (fd >= 0) 1488 FdFileCreate(thr, pc, fd); 1489 return fd; 1490 } 1491 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) 1492 #else 1493 #define TSAN_MAYBE_INTERCEPT_OPEN64 1494 #endif 1495 1496 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { 1497 SCOPED_TSAN_INTERCEPTOR(creat, name, mode); 1498 READ_STRING(thr, pc, name, 0); 1499 int fd = REAL(creat)(name, mode); 1500 if (fd >= 0) 1501 FdFileCreate(thr, pc, fd); 1502 return fd; 1503 } 1504 1505 #if SANITIZER_LINUX 1506 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { 1507 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); 1508 READ_STRING(thr, pc, name, 0); 1509 int fd = REAL(creat64)(name, mode); 1510 if (fd >= 0) 1511 FdFileCreate(thr, pc, fd); 1512 return fd; 1513 } 1514 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) 1515 #else 1516 #define TSAN_MAYBE_INTERCEPT_CREAT64 1517 #endif 1518 1519 TSAN_INTERCEPTOR(int, dup, int oldfd) { 1520 SCOPED_TSAN_INTERCEPTOR(dup, oldfd); 1521 int newfd = REAL(dup)(oldfd); 1522 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) 1523 FdDup(thr, pc, oldfd, newfd, true); 1524 return newfd; 1525 } 1526 1527 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { 1528 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); 1529 int newfd2 = REAL(dup2)(oldfd, newfd); 1530 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1531 FdDup(thr, pc, oldfd, newfd2, false); 1532 return newfd2; 1533 } 1534 1535 #if !SANITIZER_MAC 1536 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { 1537 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); 1538 int newfd2 = REAL(dup3)(oldfd, newfd, flags); 1539 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1540 FdDup(thr, pc, oldfd, newfd2, false); 1541 return newfd2; 1542 } 1543 #endif 1544 1545 #if SANITIZER_LINUX 1546 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { 1547 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); 1548 int fd = REAL(eventfd)(initval, flags); 1549 if (fd >= 0) 1550 FdEventCreate(thr, pc, fd); 1551 return fd; 1552 } 1553 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) 1554 #else 1555 #define TSAN_MAYBE_INTERCEPT_EVENTFD 1556 #endif 1557 1558 #if SANITIZER_LINUX 1559 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { 1560 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); 1561 if (fd >= 0) 1562 FdClose(thr, pc, fd); 1563 fd = REAL(signalfd)(fd, mask, flags); 1564 if (fd >= 0) 1565 FdSignalCreate(thr, pc, fd); 1566 return fd; 1567 } 1568 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) 1569 #else 1570 #define TSAN_MAYBE_INTERCEPT_SIGNALFD 1571 #endif 1572 1573 #if SANITIZER_LINUX 1574 TSAN_INTERCEPTOR(int, inotify_init, int fake) { 1575 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); 1576 int fd = REAL(inotify_init)(fake); 1577 if (fd >= 0) 1578 FdInotifyCreate(thr, pc, fd); 1579 return fd; 1580 } 1581 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) 1582 #else 1583 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT 1584 #endif 1585 1586 #if SANITIZER_LINUX 1587 TSAN_INTERCEPTOR(int, inotify_init1, int flags) { 1588 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); 1589 int fd = REAL(inotify_init1)(flags); 1590 if (fd >= 0) 1591 FdInotifyCreate(thr, pc, fd); 1592 return fd; 1593 } 1594 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) 1595 #else 1596 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 1597 #endif 1598 1599 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { 1600 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); 1601 int fd = REAL(socket)(domain, type, protocol); 1602 if (fd >= 0) 1603 FdSocketCreate(thr, pc, fd); 1604 return fd; 1605 } 1606 1607 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { 1608 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); 1609 int res = REAL(socketpair)(domain, type, protocol, fd); 1610 if (res == 0 && fd[0] >= 0 && fd[1] >= 0) 1611 FdPipeCreate(thr, pc, fd[0], fd[1]); 1612 return res; 1613 } 1614 1615 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { 1616 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); 1617 FdSocketConnecting(thr, pc, fd); 1618 int res = REAL(connect)(fd, addr, addrlen); 1619 if (res == 0 && fd >= 0) 1620 FdSocketConnect(thr, pc, fd); 1621 return res; 1622 } 1623 1624 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { 1625 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); 1626 int res = REAL(bind)(fd, addr, addrlen); 1627 if (fd > 0 && res == 0) 1628 FdAccess(thr, pc, fd); 1629 return res; 1630 } 1631 1632 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { 1633 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); 1634 int res = REAL(listen)(fd, backlog); 1635 if (fd > 0 && res == 0) 1636 FdAccess(thr, pc, fd); 1637 return res; 1638 } 1639 1640 TSAN_INTERCEPTOR(int, close, int fd) { 1641 SCOPED_TSAN_INTERCEPTOR(close, fd); 1642 if (fd >= 0) 1643 FdClose(thr, pc, fd); 1644 return REAL(close)(fd); 1645 } 1646 1647 #if SANITIZER_LINUX 1648 TSAN_INTERCEPTOR(int, __close, int fd) { 1649 SCOPED_TSAN_INTERCEPTOR(__close, fd); 1650 if (fd >= 0) 1651 FdClose(thr, pc, fd); 1652 return REAL(__close)(fd); 1653 } 1654 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) 1655 #else 1656 #define TSAN_MAYBE_INTERCEPT___CLOSE 1657 #endif 1658 1659 // glibc guts 1660 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1661 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { 1662 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); 1663 int fds[64]; 1664 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); 1665 for (int i = 0; i < cnt; i++) { 1666 if (fds[i] > 0) 1667 FdClose(thr, pc, fds[i]); 1668 } 1669 REAL(__res_iclose)(state, free_addr); 1670 } 1671 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) 1672 #else 1673 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE 1674 #endif 1675 1676 TSAN_INTERCEPTOR(int, pipe, int *pipefd) { 1677 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); 1678 int res = REAL(pipe)(pipefd); 1679 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1680 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1681 return res; 1682 } 1683 1684 #if !SANITIZER_MAC 1685 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { 1686 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); 1687 int res = REAL(pipe2)(pipefd, flags); 1688 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1689 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1690 return res; 1691 } 1692 #endif 1693 1694 TSAN_INTERCEPTOR(int, unlink, char *path) { 1695 SCOPED_TSAN_INTERCEPTOR(unlink, path); 1696 Release(thr, pc, File2addr(path)); 1697 int res = REAL(unlink)(path); 1698 return res; 1699 } 1700 1701 TSAN_INTERCEPTOR(void*, tmpfile, int fake) { 1702 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); 1703 void *res = REAL(tmpfile)(fake); 1704 if (res) { 1705 int fd = fileno_unlocked(res); 1706 if (fd >= 0) 1707 FdFileCreate(thr, pc, fd); 1708 } 1709 return res; 1710 } 1711 1712 #if SANITIZER_LINUX 1713 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { 1714 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); 1715 void *res = REAL(tmpfile64)(fake); 1716 if (res) { 1717 int fd = fileno_unlocked(res); 1718 if (fd >= 0) 1719 FdFileCreate(thr, pc, fd); 1720 } 1721 return res; 1722 } 1723 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) 1724 #else 1725 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 1726 #endif 1727 1728 static void FlushStreams() { 1729 // Flushing all the streams here may freeze the process if a child thread is 1730 // performing file stream operations at the same time. 1731 REAL(fflush)(stdout); 1732 REAL(fflush)(stderr); 1733 } 1734 1735 TSAN_INTERCEPTOR(void, abort, int fake) { 1736 SCOPED_TSAN_INTERCEPTOR(abort, fake); 1737 FlushStreams(); 1738 REAL(abort)(fake); 1739 } 1740 1741 TSAN_INTERCEPTOR(int, rmdir, char *path) { 1742 SCOPED_TSAN_INTERCEPTOR(rmdir, path); 1743 Release(thr, pc, Dir2addr(path)); 1744 int res = REAL(rmdir)(path); 1745 return res; 1746 } 1747 1748 TSAN_INTERCEPTOR(int, closedir, void *dirp) { 1749 SCOPED_TSAN_INTERCEPTOR(closedir, dirp); 1750 if (dirp) { 1751 int fd = dirfd(dirp); 1752 FdClose(thr, pc, fd); 1753 } 1754 return REAL(closedir)(dirp); 1755 } 1756 1757 #if SANITIZER_LINUX 1758 TSAN_INTERCEPTOR(int, epoll_create, int size) { 1759 SCOPED_TSAN_INTERCEPTOR(epoll_create, size); 1760 int fd = REAL(epoll_create)(size); 1761 if (fd >= 0) 1762 FdPollCreate(thr, pc, fd); 1763 return fd; 1764 } 1765 1766 TSAN_INTERCEPTOR(int, epoll_create1, int flags) { 1767 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); 1768 int fd = REAL(epoll_create1)(flags); 1769 if (fd >= 0) 1770 FdPollCreate(thr, pc, fd); 1771 return fd; 1772 } 1773 1774 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { 1775 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); 1776 if (epfd >= 0) 1777 FdAccess(thr, pc, epfd); 1778 if (epfd >= 0 && fd >= 0) 1779 FdAccess(thr, pc, fd); 1780 if (op == EPOLL_CTL_ADD && epfd >= 0) 1781 FdRelease(thr, pc, epfd); 1782 int res = REAL(epoll_ctl)(epfd, op, fd, ev); 1783 return res; 1784 } 1785 1786 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { 1787 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); 1788 if (epfd >= 0) 1789 FdAccess(thr, pc, epfd); 1790 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); 1791 if (res > 0 && epfd >= 0) 1792 FdAcquire(thr, pc, epfd); 1793 return res; 1794 } 1795 1796 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout, 1797 void *sigmask) { 1798 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask); 1799 if (epfd >= 0) 1800 FdAccess(thr, pc, epfd); 1801 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask); 1802 if (res > 0 && epfd >= 0) 1803 FdAcquire(thr, pc, epfd); 1804 return res; 1805 } 1806 1807 #define TSAN_MAYBE_INTERCEPT_EPOLL \ 1808 TSAN_INTERCEPT(epoll_create); \ 1809 TSAN_INTERCEPT(epoll_create1); \ 1810 TSAN_INTERCEPT(epoll_ctl); \ 1811 TSAN_INTERCEPT(epoll_wait); \ 1812 TSAN_INTERCEPT(epoll_pwait) 1813 #else 1814 #define TSAN_MAYBE_INTERCEPT_EPOLL 1815 #endif 1816 1817 // The following functions are intercepted merely to process pending signals. 1818 // If program blocks signal X, we must deliver the signal before the function 1819 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend) 1820 // it's better to deliver the signal straight away. 1821 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { 1822 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); 1823 return REAL(sigsuspend)(mask); 1824 } 1825 1826 TSAN_INTERCEPTOR(int, sigblock, int mask) { 1827 SCOPED_TSAN_INTERCEPTOR(sigblock, mask); 1828 return REAL(sigblock)(mask); 1829 } 1830 1831 TSAN_INTERCEPTOR(int, sigsetmask, int mask) { 1832 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask); 1833 return REAL(sigsetmask)(mask); 1834 } 1835 1836 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set, 1837 __sanitizer_sigset_t *oldset) { 1838 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset); 1839 return REAL(pthread_sigmask)(how, set, oldset); 1840 } 1841 1842 namespace __tsan { 1843 1844 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, 1845 bool sigact, int sig, 1846 __sanitizer_siginfo *info, void *uctx) { 1847 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 1848 if (acquire) 1849 Acquire(thr, 0, (uptr)&sigactions[sig]); 1850 // Signals are generally asynchronous, so if we receive a signals when 1851 // ignores are enabled we should disable ignores. This is critical for sync 1852 // and interceptors, because otherwise we can miss syncronization and report 1853 // false races. 1854 int ignore_reads_and_writes = thr->ignore_reads_and_writes; 1855 int ignore_interceptors = thr->ignore_interceptors; 1856 int ignore_sync = thr->ignore_sync; 1857 if (!ctx->after_multithreaded_fork) { 1858 thr->ignore_reads_and_writes = 0; 1859 thr->fast_state.ClearIgnoreBit(); 1860 thr->ignore_interceptors = 0; 1861 thr->ignore_sync = 0; 1862 } 1863 // Ensure that the handler does not spoil errno. 1864 const int saved_errno = errno; 1865 errno = 99; 1866 // This code races with sigaction. Be careful to not read sa_sigaction twice. 1867 // Also need to remember pc for reporting before the call, 1868 // because the handler can reset it. 1869 volatile uptr pc = 1870 sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler; 1871 if (pc != sig_dfl && pc != sig_ign) { 1872 if (sigact) 1873 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx); 1874 else 1875 ((__sanitizer_sighandler_ptr)pc)(sig); 1876 } 1877 if (!ctx->after_multithreaded_fork) { 1878 thr->ignore_reads_and_writes = ignore_reads_and_writes; 1879 if (ignore_reads_and_writes) 1880 thr->fast_state.SetIgnoreBit(); 1881 thr->ignore_interceptors = ignore_interceptors; 1882 thr->ignore_sync = ignore_sync; 1883 } 1884 // We do not detect errno spoiling for SIGTERM, 1885 // because some SIGTERM handlers do spoil errno but reraise SIGTERM, 1886 // tsan reports false positive in such case. 1887 // It's difficult to properly detect this situation (reraise), 1888 // because in async signal processing case (when handler is called directly 1889 // from rtl_generic_sighandler) we have not yet received the reraised 1890 // signal; and it looks too fragile to intercept all ways to reraise a signal. 1891 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { 1892 VarSizeStackTrace stack; 1893 // StackTrace::GetNestInstructionPc(pc) is used because return address is 1894 // expected, OutputReport() will undo this. 1895 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); 1896 ThreadRegistryLock l(ctx->thread_registry); 1897 ScopedReport rep(ReportTypeErrnoInSignal); 1898 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { 1899 rep.AddStack(stack, true); 1900 OutputReport(thr, rep); 1901 } 1902 } 1903 errno = saved_errno; 1904 } 1905 1906 void ProcessPendingSignals(ThreadState *thr) { 1907 ThreadSignalContext *sctx = SigCtx(thr); 1908 if (sctx == 0 || 1909 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) 1910 return; 1911 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); 1912 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 1913 internal_sigfillset(&sctx->emptyset); 1914 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset); 1915 CHECK_EQ(res, 0); 1916 for (int sig = 0; sig < kSigCount; sig++) { 1917 SignalDesc *signal = &sctx->pending_signals[sig]; 1918 if (signal->armed) { 1919 signal->armed = false; 1920 CallUserSignalHandler(thr, false, true, signal->sigaction, sig, 1921 &signal->siginfo, &signal->ctx); 1922 } 1923 } 1924 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0); 1925 CHECK_EQ(res, 0); 1926 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 1927 } 1928 1929 } // namespace __tsan 1930 1931 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { 1932 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 1933 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || 1934 // If we are sending signal to ourselves, we must process it now. 1935 (sctx && sig == sctx->int_signal_send); 1936 } 1937 1938 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, 1939 __sanitizer_siginfo *info, 1940 void *ctx) { 1941 ThreadState *thr = cur_thread(); 1942 ThreadSignalContext *sctx = SigCtx(thr); 1943 if (sig < 0 || sig >= kSigCount) { 1944 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); 1945 return; 1946 } 1947 // Don't mess with synchronous signals. 1948 const bool sync = is_sync_signal(sctx, sig); 1949 if (sync || 1950 // If we are in blocking function, we can safely process it now 1951 // (but check if we are in a recursive interceptor, 1952 // i.e. pthread_join()->munmap()). 1953 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { 1954 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 1955 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { 1956 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); 1957 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx); 1958 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); 1959 } else { 1960 // Be very conservative with when we do acquire in this case. 1961 // It's unsafe to do acquire in async handlers, because ThreadState 1962 // can be in inconsistent state. 1963 // SIGSYS looks relatively safe -- it's synchronous and can actually 1964 // need some global state. 1965 bool acq = (sig == SIGSYS); 1966 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx); 1967 } 1968 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 1969 return; 1970 } 1971 1972 if (sctx == 0) 1973 return; 1974 SignalDesc *signal = &sctx->pending_signals[sig]; 1975 if (signal->armed == false) { 1976 signal->armed = true; 1977 signal->sigaction = sigact; 1978 if (info) 1979 internal_memcpy(&signal->siginfo, info, sizeof(*info)); 1980 if (ctx) 1981 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); 1982 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed); 1983 } 1984 } 1985 1986 static void rtl_sighandler(int sig) { 1987 rtl_generic_sighandler(false, sig, 0, 0); 1988 } 1989 1990 static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) { 1991 rtl_generic_sighandler(true, sig, info, ctx); 1992 } 1993 1994 TSAN_INTERCEPTOR(int, raise, int sig) { 1995 SCOPED_TSAN_INTERCEPTOR(raise, sig); 1996 ThreadSignalContext *sctx = SigCtx(thr); 1997 CHECK_NE(sctx, 0); 1998 int prev = sctx->int_signal_send; 1999 sctx->int_signal_send = sig; 2000 int res = REAL(raise)(sig); 2001 CHECK_EQ(sctx->int_signal_send, sig); 2002 sctx->int_signal_send = prev; 2003 return res; 2004 } 2005 2006 TSAN_INTERCEPTOR(int, kill, int pid, int sig) { 2007 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); 2008 ThreadSignalContext *sctx = SigCtx(thr); 2009 CHECK_NE(sctx, 0); 2010 int prev = sctx->int_signal_send; 2011 if (pid == (int)internal_getpid()) { 2012 sctx->int_signal_send = sig; 2013 } 2014 int res = REAL(kill)(pid, sig); 2015 if (pid == (int)internal_getpid()) { 2016 CHECK_EQ(sctx->int_signal_send, sig); 2017 sctx->int_signal_send = prev; 2018 } 2019 return res; 2020 } 2021 2022 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { 2023 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); 2024 ThreadSignalContext *sctx = SigCtx(thr); 2025 CHECK_NE(sctx, 0); 2026 int prev = sctx->int_signal_send; 2027 if (tid == pthread_self()) { 2028 sctx->int_signal_send = sig; 2029 } 2030 int res = REAL(pthread_kill)(tid, sig); 2031 if (tid == pthread_self()) { 2032 CHECK_EQ(sctx->int_signal_send, sig); 2033 sctx->int_signal_send = prev; 2034 } 2035 return res; 2036 } 2037 2038 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { 2039 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); 2040 // It's intercepted merely to process pending signals. 2041 return REAL(gettimeofday)(tv, tz); 2042 } 2043 2044 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, 2045 void *hints, void *rv) { 2046 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); 2047 // We miss atomic synchronization in getaddrinfo, 2048 // and can report false race between malloc and free 2049 // inside of getaddrinfo. So ignore memory accesses. 2050 ThreadIgnoreBegin(thr, pc); 2051 int res = REAL(getaddrinfo)(node, service, hints, rv); 2052 ThreadIgnoreEnd(thr, pc); 2053 return res; 2054 } 2055 2056 TSAN_INTERCEPTOR(int, fork, int fake) { 2057 if (UNLIKELY(cur_thread()->in_symbolizer)) 2058 return REAL(fork)(fake); 2059 SCOPED_INTERCEPTOR_RAW(fork, fake); 2060 ForkBefore(thr, pc); 2061 int pid; 2062 { 2063 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 2064 // we'll assert in CheckNoLocks() unless we ignore interceptors. 2065 ScopedIgnoreInterceptors ignore; 2066 pid = REAL(fork)(fake); 2067 } 2068 if (pid == 0) { 2069 // child 2070 ForkChildAfter(thr, pc); 2071 FdOnFork(thr, pc); 2072 } else if (pid > 0) { 2073 // parent 2074 ForkParentAfter(thr, pc); 2075 } else { 2076 // error 2077 ForkParentAfter(thr, pc); 2078 } 2079 return pid; 2080 } 2081 2082 TSAN_INTERCEPTOR(int, vfork, int fake) { 2083 // Some programs (e.g. openjdk) call close for all file descriptors 2084 // in the child process. Under tsan it leads to false positives, because 2085 // address space is shared, so the parent process also thinks that 2086 // the descriptors are closed (while they are actually not). 2087 // This leads to false positives due to missed synchronization. 2088 // Strictly saying this is undefined behavior, because vfork child is not 2089 // allowed to call any functions other than exec/exit. But this is what 2090 // openjdk does, so we want to handle it. 2091 // We could disable interceptors in the child process. But it's not possible 2092 // to simply intercept and wrap vfork, because vfork child is not allowed 2093 // to return from the function that calls vfork, and that's exactly what 2094 // we would do. So this would require some assembly trickery as well. 2095 // Instead we simply turn vfork into fork. 2096 return WRAP(fork)(fake); 2097 } 2098 2099 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2100 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, 2101 void *data); 2102 struct dl_iterate_phdr_data { 2103 ThreadState *thr; 2104 uptr pc; 2105 dl_iterate_phdr_cb_t cb; 2106 void *data; 2107 }; 2108 2109 static bool IsAppNotRodata(uptr addr) { 2110 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; 2111 } 2112 2113 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, 2114 void *data) { 2115 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; 2116 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later 2117 // accessible in dl_iterate_phdr callback. But we don't see synchronization 2118 // inside of dynamic linker, so we "unpoison" it here in order to not 2119 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough 2120 // because some libc functions call __libc_dlopen. 2121 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2122 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2123 internal_strlen(info->dlpi_name)); 2124 int res = cbdata->cb(info, size, cbdata->data); 2125 // Perform the check one more time in case info->dlpi_name was overwritten 2126 // by user callback. 2127 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2128 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2129 internal_strlen(info->dlpi_name)); 2130 return res; 2131 } 2132 2133 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { 2134 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); 2135 dl_iterate_phdr_data cbdata; 2136 cbdata.thr = thr; 2137 cbdata.pc = pc; 2138 cbdata.cb = cb; 2139 cbdata.data = data; 2140 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); 2141 return res; 2142 } 2143 #endif 2144 2145 static int OnExit(ThreadState *thr) { 2146 int status = Finalize(thr); 2147 FlushStreams(); 2148 return status; 2149 } 2150 2151 struct TsanInterceptorContext { 2152 ThreadState *thr; 2153 const uptr caller_pc; 2154 const uptr pc; 2155 }; 2156 2157 #if !SANITIZER_MAC 2158 static void HandleRecvmsg(ThreadState *thr, uptr pc, 2159 __sanitizer_msghdr *msg) { 2160 int fds[64]; 2161 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); 2162 for (int i = 0; i < cnt; i++) 2163 FdEventCreate(thr, pc, fds[i]); 2164 } 2165 #endif 2166 2167 #include "sanitizer_common/sanitizer_platform_interceptors.h" 2168 // Causes interceptor recursion (getaddrinfo() and fopen()) 2169 #undef SANITIZER_INTERCEPT_GETADDRINFO 2170 // There interceptors do not seem to be strictly necessary for tsan. 2171 // But we see cases where the interceptors consume 70% of execution time. 2172 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times. 2173 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each 2174 // function "writes to" the buffer. Then, the same memory is "written to" 2175 // twice, first as buf and then as pwbufp (both of them refer to the same 2176 // addresses). 2177 #undef SANITIZER_INTERCEPT_GETPWENT 2178 #undef SANITIZER_INTERCEPT_GETPWENT_R 2179 #undef SANITIZER_INTERCEPT_FGETPWENT 2180 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 2181 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 2182 // We define our own. 2183 #if SANITIZER_INTERCEPT_TLS_GET_ADDR 2184 #define NEED_TLS_GET_ADDR 2185 #endif 2186 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR 2187 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK 2188 2189 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) 2190 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ 2191 INTERCEPT_FUNCTION_VER(name, ver) 2192 2193 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ 2194 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ 2195 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ 2196 true) 2197 2198 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ 2199 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ 2200 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ 2201 false) 2202 2203 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ 2204 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ 2205 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2206 ctx = (void *)&_ctx; \ 2207 (void) ctx; 2208 2209 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ 2210 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ 2211 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2212 ctx = (void *)&_ctx; \ 2213 (void) ctx; 2214 2215 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ 2216 Acquire(thr, pc, File2addr(path)); \ 2217 if (file) { \ 2218 int fd = fileno_unlocked(file); \ 2219 if (fd >= 0) FdFileCreate(thr, pc, fd); \ 2220 } 2221 2222 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ 2223 if (file) { \ 2224 int fd = fileno_unlocked(file); \ 2225 if (fd >= 0) FdClose(thr, pc, fd); \ 2226 } 2227 2228 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ 2229 libignore()->OnLibraryLoaded(filename) 2230 2231 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ 2232 libignore()->OnLibraryUnloaded() 2233 2234 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ 2235 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) 2236 2237 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ 2238 Release(((TsanInterceptorContext *) ctx)->thr, pc, u) 2239 2240 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ 2241 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) 2242 2243 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ 2244 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2245 2246 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ 2247 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2248 2249 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ 2250 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2251 2252 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ 2253 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) 2254 2255 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ 2256 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) 2257 2258 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ 2259 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name) 2260 2261 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) 2262 2263 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ 2264 OnExit(((TsanInterceptorContext *) ctx)->thr) 2265 2266 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \ 2267 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \ 2268 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2269 2270 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \ 2271 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \ 2272 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2273 2274 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ 2275 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ 2276 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2277 2278 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ 2279 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ 2280 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2281 2282 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \ 2283 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \ 2284 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2285 2286 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ 2287 off) \ 2288 do { \ 2289 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \ 2290 off); \ 2291 } while (false) 2292 2293 #if !SANITIZER_MAC 2294 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ 2295 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ 2296 ((TsanInterceptorContext *)ctx)->pc, msg) 2297 #endif 2298 2299 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ 2300 if (TsanThread *t = GetCurrentThread()) { \ 2301 *begin = t->tls_begin(); \ 2302 *end = t->tls_end(); \ 2303 } else { \ 2304 *begin = *end = 0; \ 2305 } 2306 2307 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \ 2308 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() 2309 2310 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \ 2311 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() 2312 2313 #include "sanitizer_common/sanitizer_common_interceptors.inc" 2314 2315 static int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2316 __sanitizer_sigaction *old); 2317 static __sanitizer_sighandler_ptr signal_impl(int sig, 2318 __sanitizer_sighandler_ptr h); 2319 2320 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \ 2321 { return sigaction_impl(signo, act, oldact); } 2322 2323 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \ 2324 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); } 2325 2326 #include "sanitizer_common/sanitizer_signal_interceptors.inc" 2327 2328 int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2329 __sanitizer_sigaction *old) { 2330 // Note: if we call REAL(sigaction) directly for any reason without proxying 2331 // the signal handler through rtl_sigaction, very bad things will happen. 2332 // The handler will run synchronously and corrupt tsan per-thread state. 2333 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old); 2334 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 2335 __sanitizer_sigaction old_stored; 2336 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored)); 2337 __sanitizer_sigaction newact; 2338 if (act) { 2339 // Copy act into sigactions[sig]. 2340 // Can't use struct copy, because compiler can emit call to memcpy. 2341 // Can't use internal_memcpy, because it copies byte-by-byte, 2342 // and signal handler reads the handler concurrently. It it can read 2343 // some bytes from old value and some bytes from new value. 2344 // Use volatile to prevent insertion of memcpy. 2345 sigactions[sig].handler = 2346 *(volatile __sanitizer_sighandler_ptr const *)&act->handler; 2347 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags; 2348 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, 2349 sizeof(sigactions[sig].sa_mask)); 2350 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 2351 sigactions[sig].sa_restorer = act->sa_restorer; 2352 #endif 2353 internal_memcpy(&newact, act, sizeof(newact)); 2354 internal_sigfillset(&newact.sa_mask); 2355 if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) { 2356 if (newact.sa_flags & SA_SIGINFO) 2357 newact.sigaction = rtl_sigaction; 2358 else 2359 newact.handler = rtl_sighandler; 2360 } 2361 ReleaseStore(thr, pc, (uptr)&sigactions[sig]); 2362 act = &newact; 2363 } 2364 int res = REAL(sigaction)(sig, act, old); 2365 if (res == 0 && old) { 2366 uptr cb = (uptr)old->sigaction; 2367 if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) { 2368 internal_memcpy(old, &old_stored, sizeof(*old)); 2369 } 2370 } 2371 return res; 2372 } 2373 2374 static __sanitizer_sighandler_ptr signal_impl(int sig, 2375 __sanitizer_sighandler_ptr h) { 2376 __sanitizer_sigaction act; 2377 act.handler = h; 2378 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask)); 2379 act.sa_flags = 0; 2380 __sanitizer_sigaction old; 2381 int res = sigaction_symname(sig, &act, &old); 2382 if (res) return (__sanitizer_sighandler_ptr)sig_err; 2383 return old.handler; 2384 } 2385 2386 #define TSAN_SYSCALL() \ 2387 ThreadState *thr = cur_thread(); \ 2388 if (thr->ignore_interceptors) \ 2389 return; \ 2390 ScopedSyscall scoped_syscall(thr) \ 2391 /**/ 2392 2393 struct ScopedSyscall { 2394 ThreadState *thr; 2395 2396 explicit ScopedSyscall(ThreadState *thr) 2397 : thr(thr) { 2398 Initialize(thr); 2399 } 2400 2401 ~ScopedSyscall() { 2402 ProcessPendingSignals(thr); 2403 } 2404 }; 2405 2406 #if !SANITIZER_FREEBSD && !SANITIZER_MAC 2407 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { 2408 TSAN_SYSCALL(); 2409 MemoryAccessRange(thr, pc, p, s, write); 2410 } 2411 2412 static void syscall_acquire(uptr pc, uptr addr) { 2413 TSAN_SYSCALL(); 2414 Acquire(thr, pc, addr); 2415 DPrintf("syscall_acquire(%p)\n", addr); 2416 } 2417 2418 static void syscall_release(uptr pc, uptr addr) { 2419 TSAN_SYSCALL(); 2420 DPrintf("syscall_release(%p)\n", addr); 2421 Release(thr, pc, addr); 2422 } 2423 2424 static void syscall_fd_close(uptr pc, int fd) { 2425 TSAN_SYSCALL(); 2426 FdClose(thr, pc, fd); 2427 } 2428 2429 static USED void syscall_fd_acquire(uptr pc, int fd) { 2430 TSAN_SYSCALL(); 2431 FdAcquire(thr, pc, fd); 2432 DPrintf("syscall_fd_acquire(%p)\n", fd); 2433 } 2434 2435 static USED void syscall_fd_release(uptr pc, int fd) { 2436 TSAN_SYSCALL(); 2437 DPrintf("syscall_fd_release(%p)\n", fd); 2438 FdRelease(thr, pc, fd); 2439 } 2440 2441 static void syscall_pre_fork(uptr pc) { 2442 TSAN_SYSCALL(); 2443 ForkBefore(thr, pc); 2444 } 2445 2446 static void syscall_post_fork(uptr pc, int pid) { 2447 TSAN_SYSCALL(); 2448 if (pid == 0) { 2449 // child 2450 ForkChildAfter(thr, pc); 2451 FdOnFork(thr, pc); 2452 } else if (pid > 0) { 2453 // parent 2454 ForkParentAfter(thr, pc); 2455 } else { 2456 // error 2457 ForkParentAfter(thr, pc); 2458 } 2459 } 2460 #endif 2461 2462 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ 2463 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) 2464 2465 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ 2466 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) 2467 2468 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ 2469 do { \ 2470 (void)(p); \ 2471 (void)(s); \ 2472 } while (false) 2473 2474 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ 2475 do { \ 2476 (void)(p); \ 2477 (void)(s); \ 2478 } while (false) 2479 2480 #define COMMON_SYSCALL_ACQUIRE(addr) \ 2481 syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) 2482 2483 #define COMMON_SYSCALL_RELEASE(addr) \ 2484 syscall_release(GET_CALLER_PC(), (uptr)(addr)) 2485 2486 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) 2487 2488 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) 2489 2490 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) 2491 2492 #define COMMON_SYSCALL_PRE_FORK() \ 2493 syscall_pre_fork(GET_CALLER_PC()) 2494 2495 #define COMMON_SYSCALL_POST_FORK(res) \ 2496 syscall_post_fork(GET_CALLER_PC(), res) 2497 2498 #include "sanitizer_common/sanitizer_common_syscalls.inc" 2499 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc" 2500 2501 #ifdef NEED_TLS_GET_ADDR 2502 // Define own interceptor instead of sanitizer_common's for three reasons: 2503 // 1. It must not process pending signals. 2504 // Signal handlers may contain MOVDQA instruction (see below). 2505 // 2. It must be as simple as possible to not contain MOVDQA. 2506 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which 2507 // is empty for tsan (meant only for msan). 2508 // Note: __tls_get_addr can be called with mis-aligned stack due to: 2509 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 2510 // So the interceptor must work with mis-aligned stack, in particular, does not 2511 // execute MOVDQA with stack addresses. 2512 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) { 2513 void *res = REAL(__tls_get_addr)(arg); 2514 ThreadState *thr = cur_thread(); 2515 if (!thr) 2516 return res; 2517 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, 2518 thr->tls_addr + thr->tls_size); 2519 if (!dtv) 2520 return res; 2521 // New DTLS block has been allocated. 2522 MemoryResetRange(thr, 0, dtv->beg, dtv->size); 2523 return res; 2524 } 2525 #endif 2526 2527 #if SANITIZER_NETBSD 2528 TSAN_INTERCEPTOR(void, _lwp_exit) { 2529 SCOPED_TSAN_INTERCEPTOR(_lwp_exit); 2530 DestroyThreadState(); 2531 REAL(_lwp_exit)(); 2532 } 2533 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit) 2534 #else 2535 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT 2536 #endif 2537 2538 #if SANITIZER_FREEBSD 2539 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { 2540 SCOPED_TSAN_INTERCEPTOR(thr_exit, state); 2541 DestroyThreadState(); 2542 REAL(thr_exit(state)); 2543 } 2544 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) 2545 #else 2546 #define TSAN_MAYBE_INTERCEPT_THR_EXIT 2547 #endif 2548 2549 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a) 2550 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c) 2551 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c) 2552 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m) 2553 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c) 2554 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a) 2555 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m) 2556 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m) 2557 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a) 2558 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m) 2559 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m) 2560 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m) 2561 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m) 2562 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m) 2563 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m) 2564 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)()) 2565 2566 namespace __tsan { 2567 2568 static void finalize(void *arg) { 2569 ThreadState *thr = cur_thread(); 2570 int status = Finalize(thr); 2571 // Make sure the output is not lost. 2572 FlushStreams(); 2573 if (status) 2574 Die(); 2575 } 2576 2577 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2578 static void unreachable() { 2579 Report("FATAL: ThreadSanitizer: unreachable called\n"); 2580 Die(); 2581 } 2582 #endif 2583 2584 void InitializeInterceptors() { 2585 #if !SANITIZER_MAC 2586 // We need to setup it early, because functions like dlsym() can call it. 2587 REAL(memset) = internal_memset; 2588 REAL(memcpy) = internal_memcpy; 2589 #endif 2590 2591 // Instruct libc malloc to consume less memory. 2592 #if SANITIZER_LINUX 2593 mallopt(1, 0); // M_MXFAST 2594 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD 2595 #endif 2596 2597 new(interceptor_ctx()) InterceptorContext(); 2598 2599 InitializeCommonInterceptors(); 2600 InitializeSignalInterceptors(); 2601 2602 #if !SANITIZER_MAC 2603 // We can not use TSAN_INTERCEPT to get setjmp addr, 2604 // because it does &setjmp and setjmp is not present in some versions of libc. 2605 using __interception::GetRealFunctionAddress; 2606 GetRealFunctionAddress(TSAN_STRING_SETJMP, 2607 (uptr*)&REAL(setjmp_symname), 0, 0); 2608 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); 2609 GetRealFunctionAddress(TSAN_STRING_SIGSETJMP, 2610 (uptr*)&REAL(sigsetjmp_symname), 0, 0); 2611 #if !SANITIZER_NETBSD 2612 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); 2613 #endif 2614 #endif 2615 2616 TSAN_INTERCEPT(longjmp_symname); 2617 TSAN_INTERCEPT(siglongjmp_symname); 2618 #if SANITIZER_NETBSD 2619 TSAN_INTERCEPT(_longjmp); 2620 #endif 2621 2622 TSAN_INTERCEPT(malloc); 2623 TSAN_INTERCEPT(__libc_memalign); 2624 TSAN_INTERCEPT(calloc); 2625 TSAN_INTERCEPT(realloc); 2626 TSAN_INTERCEPT(free); 2627 TSAN_INTERCEPT(cfree); 2628 TSAN_INTERCEPT(munmap); 2629 TSAN_MAYBE_INTERCEPT_MEMALIGN; 2630 TSAN_INTERCEPT(valloc); 2631 TSAN_MAYBE_INTERCEPT_PVALLOC; 2632 TSAN_INTERCEPT(posix_memalign); 2633 2634 TSAN_INTERCEPT(strcpy); // NOLINT 2635 TSAN_INTERCEPT(strncpy); 2636 TSAN_INTERCEPT(strdup); 2637 2638 TSAN_INTERCEPT(pthread_create); 2639 TSAN_INTERCEPT(pthread_join); 2640 TSAN_INTERCEPT(pthread_detach); 2641 2642 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); 2643 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); 2644 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); 2645 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); 2646 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); 2647 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); 2648 2649 TSAN_INTERCEPT(pthread_mutex_init); 2650 TSAN_INTERCEPT(pthread_mutex_destroy); 2651 TSAN_INTERCEPT(pthread_mutex_trylock); 2652 TSAN_INTERCEPT(pthread_mutex_timedlock); 2653 2654 TSAN_INTERCEPT(pthread_spin_init); 2655 TSAN_INTERCEPT(pthread_spin_destroy); 2656 TSAN_INTERCEPT(pthread_spin_lock); 2657 TSAN_INTERCEPT(pthread_spin_trylock); 2658 TSAN_INTERCEPT(pthread_spin_unlock); 2659 2660 TSAN_INTERCEPT(pthread_rwlock_init); 2661 TSAN_INTERCEPT(pthread_rwlock_destroy); 2662 TSAN_INTERCEPT(pthread_rwlock_rdlock); 2663 TSAN_INTERCEPT(pthread_rwlock_tryrdlock); 2664 TSAN_INTERCEPT(pthread_rwlock_timedrdlock); 2665 TSAN_INTERCEPT(pthread_rwlock_wrlock); 2666 TSAN_INTERCEPT(pthread_rwlock_trywrlock); 2667 TSAN_INTERCEPT(pthread_rwlock_timedwrlock); 2668 TSAN_INTERCEPT(pthread_rwlock_unlock); 2669 2670 TSAN_INTERCEPT(pthread_barrier_init); 2671 TSAN_INTERCEPT(pthread_barrier_destroy); 2672 TSAN_INTERCEPT(pthread_barrier_wait); 2673 2674 TSAN_INTERCEPT(pthread_once); 2675 2676 TSAN_INTERCEPT(fstat); 2677 TSAN_MAYBE_INTERCEPT___FXSTAT; 2678 TSAN_MAYBE_INTERCEPT_FSTAT64; 2679 TSAN_MAYBE_INTERCEPT___FXSTAT64; 2680 TSAN_INTERCEPT(open); 2681 TSAN_MAYBE_INTERCEPT_OPEN64; 2682 TSAN_INTERCEPT(creat); 2683 TSAN_MAYBE_INTERCEPT_CREAT64; 2684 TSAN_INTERCEPT(dup); 2685 TSAN_INTERCEPT(dup2); 2686 TSAN_INTERCEPT(dup3); 2687 TSAN_MAYBE_INTERCEPT_EVENTFD; 2688 TSAN_MAYBE_INTERCEPT_SIGNALFD; 2689 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; 2690 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; 2691 TSAN_INTERCEPT(socket); 2692 TSAN_INTERCEPT(socketpair); 2693 TSAN_INTERCEPT(connect); 2694 TSAN_INTERCEPT(bind); 2695 TSAN_INTERCEPT(listen); 2696 TSAN_MAYBE_INTERCEPT_EPOLL; 2697 TSAN_INTERCEPT(close); 2698 TSAN_MAYBE_INTERCEPT___CLOSE; 2699 TSAN_MAYBE_INTERCEPT___RES_ICLOSE; 2700 TSAN_INTERCEPT(pipe); 2701 TSAN_INTERCEPT(pipe2); 2702 2703 TSAN_INTERCEPT(unlink); 2704 TSAN_INTERCEPT(tmpfile); 2705 TSAN_MAYBE_INTERCEPT_TMPFILE64; 2706 TSAN_INTERCEPT(abort); 2707 TSAN_INTERCEPT(rmdir); 2708 TSAN_INTERCEPT(closedir); 2709 2710 TSAN_INTERCEPT(sigsuspend); 2711 TSAN_INTERCEPT(sigblock); 2712 TSAN_INTERCEPT(sigsetmask); 2713 TSAN_INTERCEPT(pthread_sigmask); 2714 TSAN_INTERCEPT(raise); 2715 TSAN_INTERCEPT(kill); 2716 TSAN_INTERCEPT(pthread_kill); 2717 TSAN_INTERCEPT(sleep); 2718 TSAN_INTERCEPT(usleep); 2719 TSAN_INTERCEPT(nanosleep); 2720 TSAN_INTERCEPT(pause); 2721 TSAN_INTERCEPT(gettimeofday); 2722 TSAN_INTERCEPT(getaddrinfo); 2723 2724 TSAN_INTERCEPT(fork); 2725 TSAN_INTERCEPT(vfork); 2726 #if !SANITIZER_ANDROID 2727 TSAN_INTERCEPT(dl_iterate_phdr); 2728 #endif 2729 TSAN_MAYBE_INTERCEPT_ON_EXIT; 2730 TSAN_INTERCEPT(__cxa_atexit); 2731 TSAN_INTERCEPT(_exit); 2732 2733 #ifdef NEED_TLS_GET_ADDR 2734 TSAN_INTERCEPT(__tls_get_addr); 2735 #endif 2736 2737 TSAN_MAYBE_INTERCEPT__LWP_EXIT; 2738 TSAN_MAYBE_INTERCEPT_THR_EXIT; 2739 2740 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2741 // Need to setup it, because interceptors check that the function is resolved. 2742 // But atexit is emitted directly into the module, so can't be resolved. 2743 REAL(atexit) = (int(*)(void(*)()))unreachable; 2744 #endif 2745 2746 if (REAL(__cxa_atexit)(&finalize, 0, 0)) { 2747 Printf("ThreadSanitizer: failed to setup atexit callback\n"); 2748 Die(); 2749 } 2750 2751 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 2752 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) { 2753 Printf("ThreadSanitizer: failed to create thread key\n"); 2754 Die(); 2755 } 2756 #endif 2757 2758 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init); 2759 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal); 2760 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast); 2761 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait); 2762 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy); 2763 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init); 2764 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy); 2765 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock); 2766 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init); 2767 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy); 2768 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock); 2769 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock); 2770 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock); 2771 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock); 2772 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock); 2773 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once); 2774 2775 FdInit(); 2776 } 2777 2778 } // namespace __tsan 2779 2780 // Invisible barrier for tests. 2781 // There were several unsuccessful iterations for this functionality: 2782 // 1. Initially it was implemented in user code using 2783 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on 2784 // MacOS. Futexes are linux-specific for this matter. 2785 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic 2786 // "as-if synchronized via sleep" messages in reports which failed some 2787 // output tests. 2788 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- 2789 // visible events, which lead to "failed to restore stack trace" failures. 2790 // Note that no_sanitize_thread attribute does not turn off atomic interception 2791 // so attaching it to the function defined in user code does not help. 2792 // That's why we now have what we have. 2793 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2794 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) { 2795 if (count >= (1 << 8)) { 2796 Printf("barrier_init: count is too large (%d)\n", count); 2797 Die(); 2798 } 2799 // 8 lsb is thread count, the remaining are count of entered threads. 2800 *barrier = count; 2801 } 2802 2803 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2804 void __tsan_testonly_barrier_wait(u64 *barrier) { 2805 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED); 2806 unsigned old_epoch = (old >> 8) / (old & 0xff); 2807 for (;;) { 2808 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED); 2809 unsigned cur_epoch = (cur >> 8) / (cur & 0xff); 2810 if (cur_epoch != old_epoch) 2811 return; 2812 internal_sched_yield(); 2813 } 2814 } 2815