1 //===-- tsan_interceptors_posix.cpp ---------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // FIXME: move as many interceptors as possible into 12 // sanitizer_common/sanitizer_common_interceptors.inc 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_common/sanitizer_atomic.h" 16 #include "sanitizer_common/sanitizer_errno.h" 17 #include "sanitizer_common/sanitizer_libc.h" 18 #include "sanitizer_common/sanitizer_linux.h" 19 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 20 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 21 #include "sanitizer_common/sanitizer_placement_new.h" 22 #include "sanitizer_common/sanitizer_posix.h" 23 #include "sanitizer_common/sanitizer_stacktrace.h" 24 #include "sanitizer_common/sanitizer_tls_get_addr.h" 25 #include "interception/interception.h" 26 #include "tsan_interceptors.h" 27 #include "tsan_interface.h" 28 #include "tsan_platform.h" 29 #include "tsan_suppressions.h" 30 #include "tsan_rtl.h" 31 #include "tsan_mman.h" 32 #include "tsan_fd.h" 33 34 #include <stdarg.h> 35 36 using namespace __tsan; 37 38 #if SANITIZER_FREEBSD || SANITIZER_MAC 39 #define stdout __stdoutp 40 #define stderr __stderrp 41 #endif 42 43 #if SANITIZER_NETBSD 44 #define dirfd(dirp) (*(int *)(dirp)) 45 #define fileno_unlocked(fp) \ 46 (((__sanitizer_FILE *)fp)->_file == -1 \ 47 ? -1 \ 48 : (int)(unsigned short)(((__sanitizer_FILE *)fp)->_file)) 49 50 #define stdout ((__sanitizer_FILE*)&__sF[1]) 51 #define stderr ((__sanitizer_FILE*)&__sF[2]) 52 53 #define nanosleep __nanosleep50 54 #define vfork __vfork14 55 #endif 56 57 #ifdef __mips__ 58 const int kSigCount = 129; 59 #else 60 const int kSigCount = 65; 61 #endif 62 63 #ifdef __mips__ 64 struct ucontext_t { 65 u64 opaque[768 / sizeof(u64) + 1]; 66 }; 67 #else 68 struct ucontext_t { 69 // The size is determined by looking at sizeof of real ucontext_t on linux. 70 u64 opaque[936 / sizeof(u64) + 1]; 71 }; 72 #endif 73 74 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ 75 defined(__s390x__) 76 #define PTHREAD_ABI_BASE "GLIBC_2.3.2" 77 #elif defined(__aarch64__) || SANITIZER_PPC64V2 78 #define PTHREAD_ABI_BASE "GLIBC_2.17" 79 #endif 80 81 extern "C" int pthread_attr_init(void *attr); 82 extern "C" int pthread_attr_destroy(void *attr); 83 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) 84 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); 85 extern "C" int pthread_atfork(void (*prepare)(void), void (*parent)(void), 86 void (*child)(void)); 87 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); 88 extern "C" int pthread_setspecific(unsigned key, const void *v); 89 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) 90 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) 91 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) 92 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) 93 extern "C" int pthread_equal(void *t1, void *t2); 94 extern "C" void *pthread_self(); 95 extern "C" void _exit(int status); 96 #if !SANITIZER_NETBSD 97 extern "C" int fileno_unlocked(void *stream); 98 extern "C" int dirfd(void *dirp); 99 #endif 100 #if SANITIZER_NETBSD 101 extern __sanitizer_FILE __sF[]; 102 #else 103 extern __sanitizer_FILE *stdout, *stderr; 104 #endif 105 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 106 const int PTHREAD_MUTEX_RECURSIVE = 1; 107 const int PTHREAD_MUTEX_RECURSIVE_NP = 1; 108 #else 109 const int PTHREAD_MUTEX_RECURSIVE = 2; 110 const int PTHREAD_MUTEX_RECURSIVE_NP = 2; 111 #endif 112 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 113 const int EPOLL_CTL_ADD = 1; 114 #endif 115 const int SIGILL = 4; 116 const int SIGTRAP = 5; 117 const int SIGABRT = 6; 118 const int SIGFPE = 8; 119 const int SIGSEGV = 11; 120 const int SIGPIPE = 13; 121 const int SIGTERM = 15; 122 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 123 const int SIGBUS = 10; 124 const int SIGSYS = 12; 125 #else 126 const int SIGBUS = 7; 127 const int SIGSYS = 31; 128 #endif 129 void *const MAP_FAILED = (void*)-1; 130 #if SANITIZER_NETBSD 131 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567; 132 #elif !SANITIZER_MAC 133 const int PTHREAD_BARRIER_SERIAL_THREAD = -1; 134 #endif 135 const int MAP_FIXED = 0x10; 136 typedef long long_t; 137 typedef __sanitizer::u16 mode_t; 138 139 // From /usr/include/unistd.h 140 # define F_ULOCK 0 /* Unlock a previously locked region. */ 141 # define F_LOCK 1 /* Lock a region for exclusive use. */ 142 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ 143 # define F_TEST 3 /* Test a region for other processes locks. */ 144 145 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 146 const int SA_SIGINFO = 0x40; 147 const int SIG_SETMASK = 3; 148 #elif defined(__mips__) 149 const int SA_SIGINFO = 8; 150 const int SIG_SETMASK = 3; 151 #else 152 const int SA_SIGINFO = 4; 153 const int SIG_SETMASK = 2; 154 #endif 155 156 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ 157 (!cur_thread_init()->is_inited) 158 159 namespace __tsan { 160 struct SignalDesc { 161 bool armed; 162 __sanitizer_siginfo siginfo; 163 ucontext_t ctx; 164 }; 165 166 struct ThreadSignalContext { 167 int int_signal_send; 168 atomic_uintptr_t in_blocking_func; 169 SignalDesc pending_signals[kSigCount]; 170 // emptyset and oldset are too big for stack. 171 __sanitizer_sigset_t emptyset; 172 __sanitizer_sigset_t oldset; 173 }; 174 175 // The sole reason tsan wraps atexit callbacks is to establish synchronization 176 // between callback setup and callback execution. 177 struct AtExitCtx { 178 void (*f)(); 179 void *arg; 180 uptr pc; 181 }; 182 183 // InterceptorContext holds all global data required for interceptors. 184 // It's explicitly constructed in InitializeInterceptors with placement new 185 // and is never destroyed. This allows usage of members with non-trivial 186 // constructors and destructors. 187 struct InterceptorContext { 188 // The object is 64-byte aligned, because we want hot data to be located 189 // in a single cache line if possible (it's accessed in every interceptor). 190 ALIGNED(64) LibIgnore libignore; 191 __sanitizer_sigaction sigactions[kSigCount]; 192 #if !SANITIZER_MAC && !SANITIZER_NETBSD 193 unsigned finalize_key; 194 #endif 195 196 Mutex atexit_mu; 197 Vector<struct AtExitCtx *> AtExitStack; 198 199 InterceptorContext() : libignore(LINKER_INITIALIZED), atexit_mu(MutexTypeAtExit), AtExitStack() {} 200 }; 201 202 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)]; 203 InterceptorContext *interceptor_ctx() { 204 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]); 205 } 206 207 LibIgnore *libignore() { 208 return &interceptor_ctx()->libignore; 209 } 210 211 void InitializeLibIgnore() { 212 const SuppressionContext &supp = *Suppressions(); 213 const uptr n = supp.SuppressionCount(); 214 for (uptr i = 0; i < n; i++) { 215 const Suppression *s = supp.SuppressionAt(i); 216 if (0 == internal_strcmp(s->type, kSuppressionLib)) 217 libignore()->AddIgnoredLibrary(s->templ); 218 } 219 if (flags()->ignore_noninstrumented_modules) 220 libignore()->IgnoreNoninstrumentedModules(true); 221 libignore()->OnLibraryLoaded(0); 222 } 223 224 // The following two hooks can be used by for cooperative scheduling when 225 // locking. 226 #ifdef TSAN_EXTERNAL_HOOKS 227 void OnPotentiallyBlockingRegionBegin(); 228 void OnPotentiallyBlockingRegionEnd(); 229 #else 230 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {} 231 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {} 232 #endif 233 234 } // namespace __tsan 235 236 static ThreadSignalContext *SigCtx(ThreadState *thr) { 237 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; 238 if (ctx == 0 && !thr->is_dead) { 239 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); 240 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); 241 thr->signal_ctx = ctx; 242 } 243 return ctx; 244 } 245 246 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, 247 uptr pc) 248 : thr_(thr), in_ignored_lib_(false), ignoring_(false) { 249 LazyInitialize(thr); 250 if (!thr_->is_inited) return; 251 if (!thr_->ignore_interceptors) FuncEntry(thr, pc); 252 DPrintf("#%d: intercept %s()\n", thr_->tid, fname); 253 ignoring_ = 254 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses || 255 libignore()->IsIgnored(pc, &in_ignored_lib_)); 256 EnableIgnores(); 257 } 258 259 ScopedInterceptor::~ScopedInterceptor() { 260 if (!thr_->is_inited) return; 261 DisableIgnores(); 262 if (!thr_->ignore_interceptors) { 263 ProcessPendingSignals(thr_); 264 FuncExit(thr_); 265 CheckedMutex::CheckNoLocks(); 266 } 267 } 268 269 NOINLINE 270 void ScopedInterceptor::EnableIgnoresImpl() { 271 ThreadIgnoreBegin(thr_, 0); 272 if (flags()->ignore_noninstrumented_modules) 273 thr_->suppress_reports++; 274 if (in_ignored_lib_) { 275 DCHECK(!thr_->in_ignored_lib); 276 thr_->in_ignored_lib = true; 277 } 278 } 279 280 NOINLINE 281 void ScopedInterceptor::DisableIgnoresImpl() { 282 ThreadIgnoreEnd(thr_); 283 if (flags()->ignore_noninstrumented_modules) 284 thr_->suppress_reports--; 285 if (in_ignored_lib_) { 286 DCHECK(thr_->in_ignored_lib); 287 thr_->in_ignored_lib = false; 288 } 289 } 290 291 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) 292 #if SANITIZER_FREEBSD || SANITIZER_NETBSD 293 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 294 #else 295 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) 296 #endif 297 #if SANITIZER_FREEBSD 298 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) \ 299 INTERCEPT_FUNCTION(_pthread_##func) 300 #else 301 # define TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(func) 302 #endif 303 #if SANITIZER_NETBSD 304 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \ 305 INTERCEPT_FUNCTION(__libc_##func) 306 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \ 307 INTERCEPT_FUNCTION(__libc_thr_##func) 308 #else 309 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) 310 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) 311 #endif 312 313 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ 314 MemoryAccessRange((thr), (pc), (uptr)(s), \ 315 common_flags()->strict_string_checks ? (len) + 1 : (n), false) 316 317 #define READ_STRING(thr, pc, s, n) \ 318 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) 319 320 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) 321 322 struct BlockingCall { 323 explicit BlockingCall(ThreadState *thr) 324 : thr(thr) 325 , ctx(SigCtx(thr)) { 326 for (;;) { 327 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); 328 if (atomic_load(&thr->pending_signals, memory_order_relaxed) == 0) 329 break; 330 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 331 ProcessPendingSignals(thr); 332 } 333 // When we are in a "blocking call", we process signals asynchronously 334 // (right when they arrive). In this context we do not expect to be 335 // executing any user/runtime code. The known interceptor sequence when 336 // this is not true is: pthread_join -> munmap(stack). It's fine 337 // to ignore munmap in this case -- we handle stack shadow separately. 338 thr->ignore_interceptors++; 339 } 340 341 ~BlockingCall() { 342 thr->ignore_interceptors--; 343 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 344 } 345 346 ThreadState *thr; 347 ThreadSignalContext *ctx; 348 }; 349 350 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { 351 SCOPED_TSAN_INTERCEPTOR(sleep, sec); 352 unsigned res = BLOCK_REAL(sleep)(sec); 353 AfterSleep(thr, pc); 354 return res; 355 } 356 357 TSAN_INTERCEPTOR(int, usleep, long_t usec) { 358 SCOPED_TSAN_INTERCEPTOR(usleep, usec); 359 int res = BLOCK_REAL(usleep)(usec); 360 AfterSleep(thr, pc); 361 return res; 362 } 363 364 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { 365 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); 366 int res = BLOCK_REAL(nanosleep)(req, rem); 367 AfterSleep(thr, pc); 368 return res; 369 } 370 371 TSAN_INTERCEPTOR(int, pause, int fake) { 372 SCOPED_TSAN_INTERCEPTOR(pause, fake); 373 return BLOCK_REAL(pause)(fake); 374 } 375 376 // Note: we specifically call the function in such strange way 377 // with "installed_at" because in reports it will appear between 378 // callback frames and the frame that installed the callback. 379 static void at_exit_callback_installed_at() { 380 AtExitCtx *ctx; 381 { 382 // Ensure thread-safety. 383 Lock l(&interceptor_ctx()->atexit_mu); 384 385 // Pop AtExitCtx from the top of the stack of callback functions 386 uptr element = interceptor_ctx()->AtExitStack.Size() - 1; 387 ctx = interceptor_ctx()->AtExitStack[element]; 388 interceptor_ctx()->AtExitStack.PopBack(); 389 } 390 391 ThreadState *thr = cur_thread(); 392 Acquire(thr, ctx->pc, (uptr)ctx); 393 FuncEntry(thr, ctx->pc); 394 ((void(*)())ctx->f)(); 395 FuncExit(thr); 396 Free(ctx); 397 } 398 399 static void cxa_at_exit_callback_installed_at(void *arg) { 400 ThreadState *thr = cur_thread(); 401 AtExitCtx *ctx = (AtExitCtx*)arg; 402 Acquire(thr, ctx->pc, (uptr)arg); 403 FuncEntry(thr, ctx->pc); 404 ((void(*)(void *arg))ctx->f)(ctx->arg); 405 FuncExit(thr); 406 Free(ctx); 407 } 408 409 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 410 void *arg, void *dso); 411 412 #if !SANITIZER_ANDROID 413 TSAN_INTERCEPTOR(int, atexit, void (*f)()) { 414 if (in_symbolizer()) 415 return 0; 416 // We want to setup the atexit callback even if we are in ignored lib 417 // or after fork. 418 SCOPED_INTERCEPTOR_RAW(atexit, f); 419 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, 0, 0); 420 } 421 #endif 422 423 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { 424 if (in_symbolizer()) 425 return 0; 426 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); 427 return setup_at_exit_wrapper(thr, GET_CALLER_PC(), (void (*)())f, arg, dso); 428 } 429 430 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 431 void *arg, void *dso) { 432 auto *ctx = New<AtExitCtx>(); 433 ctx->f = f; 434 ctx->arg = arg; 435 ctx->pc = pc; 436 Release(thr, pc, (uptr)ctx); 437 // Memory allocation in __cxa_atexit will race with free during exit, 438 // because we do not see synchronization around atexit callback list. 439 ThreadIgnoreBegin(thr, pc); 440 int res; 441 if (!dso) { 442 // NetBSD does not preserve the 2nd argument if dso is equal to 0 443 // Store ctx in a local stack-like structure 444 445 // Ensure thread-safety. 446 Lock l(&interceptor_ctx()->atexit_mu); 447 // __cxa_atexit calls calloc. If we don't ignore interceptors, we will fail 448 // due to atexit_mu held on exit from the calloc interceptor. 449 ScopedIgnoreInterceptors ignore; 450 451 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_callback_installed_at, 452 0, 0); 453 // Push AtExitCtx on the top of the stack of callback functions 454 if (!res) { 455 interceptor_ctx()->AtExitStack.PushBack(ctx); 456 } 457 } else { 458 res = REAL(__cxa_atexit)(cxa_at_exit_callback_installed_at, ctx, dso); 459 } 460 ThreadIgnoreEnd(thr); 461 return res; 462 } 463 464 #if !SANITIZER_MAC && !SANITIZER_NETBSD 465 static void on_exit_callback_installed_at(int status, void *arg) { 466 ThreadState *thr = cur_thread(); 467 AtExitCtx *ctx = (AtExitCtx*)arg; 468 Acquire(thr, ctx->pc, (uptr)arg); 469 FuncEntry(thr, ctx->pc); 470 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); 471 FuncExit(thr); 472 Free(ctx); 473 } 474 475 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { 476 if (in_symbolizer()) 477 return 0; 478 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); 479 auto *ctx = New<AtExitCtx>(); 480 ctx->f = (void(*)())f; 481 ctx->arg = arg; 482 ctx->pc = GET_CALLER_PC(); 483 Release(thr, pc, (uptr)ctx); 484 // Memory allocation in __cxa_atexit will race with free during exit, 485 // because we do not see synchronization around atexit callback list. 486 ThreadIgnoreBegin(thr, pc); 487 int res = REAL(on_exit)(on_exit_callback_installed_at, ctx); 488 ThreadIgnoreEnd(thr); 489 return res; 490 } 491 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit) 492 #else 493 #define TSAN_MAYBE_INTERCEPT_ON_EXIT 494 #endif 495 496 // Cleanup old bufs. 497 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { 498 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 499 JmpBuf *buf = &thr->jmp_bufs[i]; 500 if (buf->sp <= sp) { 501 uptr sz = thr->jmp_bufs.Size(); 502 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); 503 thr->jmp_bufs.PopBack(); 504 i--; 505 } 506 } 507 } 508 509 static void SetJmp(ThreadState *thr, uptr sp) { 510 if (!thr->is_inited) // called from libc guts during bootstrap 511 return; 512 // Cleanup old bufs. 513 JmpBufGarbageCollect(thr, sp); 514 // Remember the buf. 515 JmpBuf *buf = thr->jmp_bufs.PushBack(); 516 buf->sp = sp; 517 buf->shadow_stack_pos = thr->shadow_stack_pos; 518 ThreadSignalContext *sctx = SigCtx(thr); 519 buf->int_signal_send = sctx ? sctx->int_signal_send : 0; 520 buf->in_blocking_func = sctx ? 521 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : 522 false; 523 buf->in_signal_handler = atomic_load(&thr->in_signal_handler, 524 memory_order_relaxed); 525 } 526 527 static void LongJmp(ThreadState *thr, uptr *env) { 528 uptr sp = ExtractLongJmpSp(env); 529 // Find the saved buf with matching sp. 530 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 531 JmpBuf *buf = &thr->jmp_bufs[i]; 532 if (buf->sp == sp) { 533 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); 534 // Unwind the stack. 535 while (thr->shadow_stack_pos > buf->shadow_stack_pos) 536 FuncExit(thr); 537 ThreadSignalContext *sctx = SigCtx(thr); 538 if (sctx) { 539 sctx->int_signal_send = buf->int_signal_send; 540 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, 541 memory_order_relaxed); 542 } 543 atomic_store(&thr->in_signal_handler, buf->in_signal_handler, 544 memory_order_relaxed); 545 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp 546 return; 547 } 548 } 549 Printf("ThreadSanitizer: can't find longjmp buf\n"); 550 CHECK(0); 551 } 552 553 // FIXME: put everything below into a common extern "C" block? 554 extern "C" void __tsan_setjmp(uptr sp) { SetJmp(cur_thread_init(), sp); } 555 556 #if SANITIZER_MAC 557 TSAN_INTERCEPTOR(int, setjmp, void *env); 558 TSAN_INTERCEPTOR(int, _setjmp, void *env); 559 TSAN_INTERCEPTOR(int, sigsetjmp, void *env); 560 #else // SANITIZER_MAC 561 562 #if SANITIZER_NETBSD 563 #define setjmp_symname __setjmp14 564 #define sigsetjmp_symname __sigsetjmp14 565 #else 566 #define setjmp_symname setjmp 567 #define sigsetjmp_symname sigsetjmp 568 #endif 569 570 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x 571 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x) 572 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname) 573 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname) 574 575 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname) 576 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname) 577 578 // Not called. Merely to satisfy TSAN_INTERCEPT(). 579 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 580 int TSAN_INTERCEPTOR_SETJMP(void *env); 581 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) { 582 CHECK(0); 583 return 0; 584 } 585 586 // FIXME: any reason to have a separate declaration? 587 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 588 int __interceptor__setjmp(void *env); 589 extern "C" int __interceptor__setjmp(void *env) { 590 CHECK(0); 591 return 0; 592 } 593 594 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 595 int TSAN_INTERCEPTOR_SIGSETJMP(void *env); 596 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) { 597 CHECK(0); 598 return 0; 599 } 600 601 #if !SANITIZER_NETBSD 602 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 603 int __interceptor___sigsetjmp(void *env); 604 extern "C" int __interceptor___sigsetjmp(void *env) { 605 CHECK(0); 606 return 0; 607 } 608 #endif 609 610 extern "C" int setjmp_symname(void *env); 611 extern "C" int _setjmp(void *env); 612 extern "C" int sigsetjmp_symname(void *env); 613 #if !SANITIZER_NETBSD 614 extern "C" int __sigsetjmp(void *env); 615 #endif 616 DEFINE_REAL(int, setjmp_symname, void *env) 617 DEFINE_REAL(int, _setjmp, void *env) 618 DEFINE_REAL(int, sigsetjmp_symname, void *env) 619 #if !SANITIZER_NETBSD 620 DEFINE_REAL(int, __sigsetjmp, void *env) 621 #endif 622 #endif // SANITIZER_MAC 623 624 #if SANITIZER_NETBSD 625 #define longjmp_symname __longjmp14 626 #define siglongjmp_symname __siglongjmp14 627 #else 628 #define longjmp_symname longjmp 629 #define siglongjmp_symname siglongjmp 630 #endif 631 632 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) { 633 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor, 634 // bad things will happen. We will jump over ScopedInterceptor dtor and can 635 // leave thr->in_ignored_lib set. 636 { 637 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val); 638 } 639 LongJmp(cur_thread(), env); 640 REAL(longjmp_symname)(env, val); 641 } 642 643 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) { 644 { 645 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val); 646 } 647 LongJmp(cur_thread(), env); 648 REAL(siglongjmp_symname)(env, val); 649 } 650 651 #if SANITIZER_NETBSD 652 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) { 653 { 654 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val); 655 } 656 LongJmp(cur_thread(), env); 657 REAL(_longjmp)(env, val); 658 } 659 #endif 660 661 #if !SANITIZER_MAC 662 TSAN_INTERCEPTOR(void*, malloc, uptr size) { 663 if (in_symbolizer()) 664 return InternalAlloc(size); 665 void *p = 0; 666 { 667 SCOPED_INTERCEPTOR_RAW(malloc, size); 668 p = user_alloc(thr, pc, size); 669 } 670 invoke_malloc_hook(p, size); 671 return p; 672 } 673 674 // In glibc<2.25, dynamic TLS blocks are allocated by __libc_memalign. Intercept 675 // __libc_memalign so that (1) we can detect races (2) free will not be called 676 // on libc internally allocated blocks. 677 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { 678 SCOPED_INTERCEPTOR_RAW(__libc_memalign, align, sz); 679 return user_memalign(thr, pc, align, sz); 680 } 681 682 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { 683 if (in_symbolizer()) 684 return InternalCalloc(size, n); 685 void *p = 0; 686 { 687 SCOPED_INTERCEPTOR_RAW(calloc, size, n); 688 p = user_calloc(thr, pc, size, n); 689 } 690 invoke_malloc_hook(p, n * size); 691 return p; 692 } 693 694 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { 695 if (in_symbolizer()) 696 return InternalRealloc(p, size); 697 if (p) 698 invoke_free_hook(p); 699 { 700 SCOPED_INTERCEPTOR_RAW(realloc, p, size); 701 p = user_realloc(thr, pc, p, size); 702 } 703 invoke_malloc_hook(p, size); 704 return p; 705 } 706 707 TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) { 708 if (in_symbolizer()) 709 return InternalReallocArray(p, size, n); 710 if (p) 711 invoke_free_hook(p); 712 { 713 SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n); 714 p = user_reallocarray(thr, pc, p, size, n); 715 } 716 invoke_malloc_hook(p, size); 717 return p; 718 } 719 720 TSAN_INTERCEPTOR(void, free, void *p) { 721 if (p == 0) 722 return; 723 if (in_symbolizer()) 724 return InternalFree(p); 725 invoke_free_hook(p); 726 SCOPED_INTERCEPTOR_RAW(free, p); 727 user_free(thr, pc, p); 728 } 729 730 TSAN_INTERCEPTOR(void, cfree, void *p) { 731 if (p == 0) 732 return; 733 if (in_symbolizer()) 734 return InternalFree(p); 735 invoke_free_hook(p); 736 SCOPED_INTERCEPTOR_RAW(cfree, p); 737 user_free(thr, pc, p); 738 } 739 740 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { 741 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); 742 return user_alloc_usable_size(p); 743 } 744 #endif 745 746 TSAN_INTERCEPTOR(char *, strcpy, char *dst, const char *src) { 747 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); 748 uptr srclen = internal_strlen(src); 749 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); 750 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); 751 return REAL(strcpy)(dst, src); 752 } 753 754 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { 755 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); 756 uptr srclen = internal_strnlen(src, n); 757 MemoryAccessRange(thr, pc, (uptr)dst, n, true); 758 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); 759 return REAL(strncpy)(dst, src, n); 760 } 761 762 TSAN_INTERCEPTOR(char*, strdup, const char *str) { 763 SCOPED_TSAN_INTERCEPTOR(strdup, str); 764 // strdup will call malloc, so no instrumentation is required here. 765 return REAL(strdup)(str); 766 } 767 768 // Zero out addr if it points into shadow memory and was provided as a hint 769 // only, i.e., MAP_FIXED is not set. 770 static bool fix_mmap_addr(void **addr, long_t sz, int flags) { 771 if (*addr) { 772 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { 773 if (flags & MAP_FIXED) { 774 errno = errno_EINVAL; 775 return false; 776 } else { 777 *addr = 0; 778 } 779 } 780 } 781 return true; 782 } 783 784 template <class Mmap> 785 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap, 786 void *addr, SIZE_T sz, int prot, int flags, 787 int fd, OFF64_T off) { 788 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; 789 void *res = real_mmap(addr, sz, prot, flags, fd, off); 790 if (res != MAP_FAILED) { 791 if (!IsAppMem((uptr)res) || !IsAppMem((uptr)res + sz - 1)) { 792 Report("ThreadSanitizer: mmap at bad address: addr=%p size=%p res=%p\n", 793 addr, (void*)sz, res); 794 Die(); 795 } 796 if (fd > 0) FdAccess(thr, pc, fd); 797 MemoryRangeImitateWriteOrResetRange(thr, pc, (uptr)res, sz); 798 } 799 return res; 800 } 801 802 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { 803 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); 804 UnmapShadow(thr, (uptr)addr, sz); 805 int res = REAL(munmap)(addr, sz); 806 return res; 807 } 808 809 #if SANITIZER_LINUX 810 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { 811 SCOPED_INTERCEPTOR_RAW(memalign, align, sz); 812 return user_memalign(thr, pc, align, sz); 813 } 814 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) 815 #else 816 #define TSAN_MAYBE_INTERCEPT_MEMALIGN 817 #endif 818 819 #if !SANITIZER_MAC 820 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { 821 if (in_symbolizer()) 822 return InternalAlloc(sz, nullptr, align); 823 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz); 824 return user_aligned_alloc(thr, pc, align, sz); 825 } 826 827 TSAN_INTERCEPTOR(void*, valloc, uptr sz) { 828 if (in_symbolizer()) 829 return InternalAlloc(sz, nullptr, GetPageSizeCached()); 830 SCOPED_INTERCEPTOR_RAW(valloc, sz); 831 return user_valloc(thr, pc, sz); 832 } 833 #endif 834 835 #if SANITIZER_LINUX 836 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { 837 if (in_symbolizer()) { 838 uptr PageSize = GetPageSizeCached(); 839 sz = sz ? RoundUpTo(sz, PageSize) : PageSize; 840 return InternalAlloc(sz, nullptr, PageSize); 841 } 842 SCOPED_INTERCEPTOR_RAW(pvalloc, sz); 843 return user_pvalloc(thr, pc, sz); 844 } 845 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) 846 #else 847 #define TSAN_MAYBE_INTERCEPT_PVALLOC 848 #endif 849 850 #if !SANITIZER_MAC 851 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { 852 if (in_symbolizer()) { 853 void *p = InternalAlloc(sz, nullptr, align); 854 if (!p) 855 return errno_ENOMEM; 856 *memptr = p; 857 return 0; 858 } 859 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); 860 return user_posix_memalign(thr, pc, memptr, align, sz); 861 } 862 #endif 863 864 // Both __cxa_guard_acquire and pthread_once 0-initialize 865 // the object initially. pthread_once does not have any 866 // other ABI requirements. __cxa_guard_acquire assumes 867 // that any non-0 value in the first byte means that 868 // initialization is completed. Contents of the remaining 869 // bytes are up to us. 870 constexpr u32 kGuardInit = 0; 871 constexpr u32 kGuardDone = 1; 872 constexpr u32 kGuardRunning = 1 << 16; 873 constexpr u32 kGuardWaiter = 1 << 17; 874 875 static int guard_acquire(ThreadState *thr, uptr pc, atomic_uint32_t *g, 876 bool blocking_hooks = true) { 877 if (blocking_hooks) 878 OnPotentiallyBlockingRegionBegin(); 879 auto on_exit = at_scope_exit([blocking_hooks] { 880 if (blocking_hooks) 881 OnPotentiallyBlockingRegionEnd(); 882 }); 883 884 for (;;) { 885 u32 cmp = atomic_load(g, memory_order_acquire); 886 if (cmp == kGuardInit) { 887 if (atomic_compare_exchange_strong(g, &cmp, kGuardRunning, 888 memory_order_relaxed)) 889 return 1; 890 } else if (cmp == kGuardDone) { 891 if (!thr->in_ignored_lib) 892 Acquire(thr, pc, (uptr)g); 893 return 0; 894 } else { 895 if ((cmp & kGuardWaiter) || 896 atomic_compare_exchange_strong(g, &cmp, cmp | kGuardWaiter, 897 memory_order_relaxed)) 898 FutexWait(g, cmp | kGuardWaiter); 899 } 900 } 901 } 902 903 static void guard_release(ThreadState *thr, uptr pc, atomic_uint32_t *g, 904 u32 v) { 905 if (!thr->in_ignored_lib) 906 Release(thr, pc, (uptr)g); 907 u32 old = atomic_exchange(g, v, memory_order_release); 908 if (old & kGuardWaiter) 909 FutexWake(g, 1 << 30); 910 } 911 912 // __cxa_guard_acquire and friends need to be intercepted in a special way - 913 // regular interceptors will break statically-linked libstdc++. Linux 914 // interceptors are especially defined as weak functions (so that they don't 915 // cause link errors when user defines them as well). So they silently 916 // auto-disable themselves when such symbol is already present in the binary. If 917 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which 918 // will silently replace our interceptor. That's why on Linux we simply export 919 // these interceptors with INTERFACE_ATTRIBUTE. 920 // On OS X, we don't support statically linking, so we just use a regular 921 // interceptor. 922 #if SANITIZER_MAC 923 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR 924 #else 925 #define STDCXX_INTERCEPTOR(rettype, name, ...) \ 926 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) 927 #endif 928 929 // Used in thread-safe function static initialization. 930 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { 931 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); 932 return guard_acquire(thr, pc, g); 933 } 934 935 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { 936 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); 937 guard_release(thr, pc, g, kGuardDone); 938 } 939 940 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { 941 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); 942 guard_release(thr, pc, g, kGuardInit); 943 } 944 945 namespace __tsan { 946 void DestroyThreadState() { 947 ThreadState *thr = cur_thread(); 948 Processor *proc = thr->proc(); 949 ThreadFinish(thr); 950 ProcUnwire(proc, thr); 951 ProcDestroy(proc); 952 DTLS_Destroy(); 953 cur_thread_finalize(); 954 } 955 956 void PlatformCleanUpThreadState(ThreadState *thr) { 957 ThreadSignalContext *sctx = thr->signal_ctx; 958 if (sctx) { 959 thr->signal_ctx = 0; 960 UnmapOrDie(sctx, sizeof(*sctx)); 961 } 962 } 963 } // namespace __tsan 964 965 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 966 static void thread_finalize(void *v) { 967 uptr iter = (uptr)v; 968 if (iter > 1) { 969 if (pthread_setspecific(interceptor_ctx()->finalize_key, 970 (void*)(iter - 1))) { 971 Printf("ThreadSanitizer: failed to set thread key\n"); 972 Die(); 973 } 974 return; 975 } 976 DestroyThreadState(); 977 } 978 #endif 979 980 981 struct ThreadParam { 982 void* (*callback)(void *arg); 983 void *param; 984 Tid tid; 985 Semaphore created; 986 Semaphore started; 987 }; 988 989 extern "C" void *__tsan_thread_start_func(void *arg) { 990 ThreadParam *p = (ThreadParam*)arg; 991 void* (*callback)(void *arg) = p->callback; 992 void *param = p->param; 993 { 994 ThreadState *thr = cur_thread_init(); 995 // Thread-local state is not initialized yet. 996 ScopedIgnoreInterceptors ignore; 997 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 998 ThreadIgnoreBegin(thr, 0); 999 if (pthread_setspecific(interceptor_ctx()->finalize_key, 1000 (void *)GetPthreadDestructorIterations())) { 1001 Printf("ThreadSanitizer: failed to set thread key\n"); 1002 Die(); 1003 } 1004 ThreadIgnoreEnd(thr); 1005 #endif 1006 p->created.Wait(); 1007 Processor *proc = ProcCreate(); 1008 ProcWire(proc, thr); 1009 ThreadStart(thr, p->tid, GetTid(), ThreadType::Regular); 1010 p->started.Post(); 1011 } 1012 void *res = callback(param); 1013 // Prevent the callback from being tail called, 1014 // it mixes up stack traces. 1015 volatile int foo = 42; 1016 foo++; 1017 return res; 1018 } 1019 1020 TSAN_INTERCEPTOR(int, pthread_create, 1021 void *th, void *attr, void *(*callback)(void*), void * param) { 1022 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); 1023 1024 MaybeSpawnBackgroundThread(); 1025 1026 if (ctx->after_multithreaded_fork) { 1027 if (flags()->die_after_fork) { 1028 Report("ThreadSanitizer: starting new threads after multi-threaded " 1029 "fork is not supported. Dying (set die_after_fork=0 to override)\n"); 1030 Die(); 1031 } else { 1032 VPrintf(1, 1033 "ThreadSanitizer: starting new threads after multi-threaded " 1034 "fork is not supported (pid %lu). Continuing because of " 1035 "die_after_fork=0, but you are on your own\n", 1036 internal_getpid()); 1037 } 1038 } 1039 __sanitizer_pthread_attr_t myattr; 1040 if (attr == 0) { 1041 pthread_attr_init(&myattr); 1042 attr = &myattr; 1043 } 1044 int detached = 0; 1045 REAL(pthread_attr_getdetachstate)(attr, &detached); 1046 AdjustStackSize(attr); 1047 1048 ThreadParam p; 1049 p.callback = callback; 1050 p.param = param; 1051 p.tid = kMainTid; 1052 int res = -1; 1053 { 1054 // Otherwise we see false positives in pthread stack manipulation. 1055 ScopedIgnoreInterceptors ignore; 1056 ThreadIgnoreBegin(thr, pc); 1057 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); 1058 ThreadIgnoreEnd(thr); 1059 } 1060 if (res == 0) { 1061 p.tid = ThreadCreate(thr, pc, *(uptr *)th, IsStateDetached(detached)); 1062 CHECK_NE(p.tid, kMainTid); 1063 // Synchronization on p.tid serves two purposes: 1064 // 1. ThreadCreate must finish before the new thread starts. 1065 // Otherwise the new thread can call pthread_detach, but the pthread_t 1066 // identifier is not yet registered in ThreadRegistry by ThreadCreate. 1067 // 2. ThreadStart must finish before this thread continues. 1068 // Otherwise, this thread can call pthread_detach and reset thr->sync 1069 // before the new thread got a chance to acquire from it in ThreadStart. 1070 p.created.Post(); 1071 p.started.Wait(); 1072 } 1073 if (attr == &myattr) 1074 pthread_attr_destroy(&myattr); 1075 return res; 1076 } 1077 1078 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { 1079 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); 1080 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th); 1081 ThreadIgnoreBegin(thr, pc); 1082 int res = BLOCK_REAL(pthread_join)(th, ret); 1083 ThreadIgnoreEnd(thr); 1084 if (res == 0) { 1085 ThreadJoin(thr, pc, tid); 1086 } 1087 return res; 1088 } 1089 1090 DEFINE_REAL_PTHREAD_FUNCTIONS 1091 1092 TSAN_INTERCEPTOR(int, pthread_detach, void *th) { 1093 SCOPED_INTERCEPTOR_RAW(pthread_detach, th); 1094 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th); 1095 int res = REAL(pthread_detach)(th); 1096 if (res == 0) { 1097 ThreadDetach(thr, pc, tid); 1098 } 1099 return res; 1100 } 1101 1102 TSAN_INTERCEPTOR(void, pthread_exit, void *retval) { 1103 { 1104 SCOPED_INTERCEPTOR_RAW(pthread_exit, retval); 1105 #if !SANITIZER_MAC && !SANITIZER_ANDROID 1106 CHECK_EQ(thr, &cur_thread_placeholder); 1107 #endif 1108 } 1109 REAL(pthread_exit)(retval); 1110 } 1111 1112 #if SANITIZER_LINUX 1113 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { 1114 SCOPED_INTERCEPTOR_RAW(pthread_tryjoin_np, th, ret); 1115 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th); 1116 ThreadIgnoreBegin(thr, pc); 1117 int res = REAL(pthread_tryjoin_np)(th, ret); 1118 ThreadIgnoreEnd(thr); 1119 if (res == 0) 1120 ThreadJoin(thr, pc, tid); 1121 else 1122 ThreadNotJoined(thr, pc, tid, (uptr)th); 1123 return res; 1124 } 1125 1126 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret, 1127 const struct timespec *abstime) { 1128 SCOPED_INTERCEPTOR_RAW(pthread_timedjoin_np, th, ret, abstime); 1129 Tid tid = ThreadConsumeTid(thr, pc, (uptr)th); 1130 ThreadIgnoreBegin(thr, pc); 1131 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime); 1132 ThreadIgnoreEnd(thr); 1133 if (res == 0) 1134 ThreadJoin(thr, pc, tid); 1135 else 1136 ThreadNotJoined(thr, pc, tid, (uptr)th); 1137 return res; 1138 } 1139 #endif 1140 1141 // Problem: 1142 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). 1143 // pthread_cond_t has different size in the different versions. 1144 // If call new REAL functions for old pthread_cond_t, they will corrupt memory 1145 // after pthread_cond_t (old cond is smaller). 1146 // If we call old REAL functions for new pthread_cond_t, we will lose some 1147 // functionality (e.g. old functions do not support waiting against 1148 // CLOCK_REALTIME). 1149 // Proper handling would require to have 2 versions of interceptors as well. 1150 // But this is messy, in particular requires linker scripts when sanitizer 1151 // runtime is linked into a shared library. 1152 // Instead we assume we don't have dynamic libraries built against old 1153 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag 1154 // that allows to work with old libraries (but this mode does not support 1155 // some features, e.g. pthread_condattr_getpshared). 1156 static void *init_cond(void *c, bool force = false) { 1157 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. 1158 // So we allocate additional memory on the side large enough to hold 1159 // any pthread_cond_t object. Always call new REAL functions, but pass 1160 // the aux object to them. 1161 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes 1162 // first word of pthread_cond_t to zero. 1163 // It's all relevant only for linux. 1164 if (!common_flags()->legacy_pthread_cond) 1165 return c; 1166 atomic_uintptr_t *p = (atomic_uintptr_t*)c; 1167 uptr cond = atomic_load(p, memory_order_acquire); 1168 if (!force && cond != 0) 1169 return (void*)cond; 1170 void *newcond = WRAP(malloc)(pthread_cond_t_sz); 1171 internal_memset(newcond, 0, pthread_cond_t_sz); 1172 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, 1173 memory_order_acq_rel)) 1174 return newcond; 1175 WRAP(free)(newcond); 1176 return (void*)cond; 1177 } 1178 1179 namespace { 1180 1181 template <class Fn> 1182 struct CondMutexUnlockCtx { 1183 ScopedInterceptor *si; 1184 ThreadState *thr; 1185 uptr pc; 1186 void *m; 1187 void *c; 1188 const Fn &fn; 1189 1190 int Cancel() const { return fn(); } 1191 void Unlock() const; 1192 }; 1193 1194 template <class Fn> 1195 void CondMutexUnlockCtx<Fn>::Unlock() const { 1196 // pthread_cond_wait interceptor has enabled async signal delivery 1197 // (see BlockingCall below). Disable async signals since we are running 1198 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run 1199 // since the thread is cancelled, so we have to manually execute them 1200 // (the thread still can run some user code due to pthread_cleanup_push). 1201 ThreadSignalContext *ctx = SigCtx(thr); 1202 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); 1203 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 1204 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); 1205 // Undo BlockingCall ctor effects. 1206 thr->ignore_interceptors--; 1207 si->~ScopedInterceptor(); 1208 } 1209 } // namespace 1210 1211 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { 1212 void *cond = init_cond(c, true); 1213 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); 1214 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1215 return REAL(pthread_cond_init)(cond, a); 1216 } 1217 1218 template <class Fn> 1219 int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, const Fn &fn, 1220 void *c, void *m) { 1221 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1222 MutexUnlock(thr, pc, (uptr)m); 1223 int res = 0; 1224 // This ensures that we handle mutex lock even in case of pthread_cancel. 1225 // See test/tsan/cond_cancel.cpp. 1226 { 1227 // Enable signal delivery while the thread is blocked. 1228 BlockingCall bc(thr); 1229 CondMutexUnlockCtx<Fn> arg = {si, thr, pc, m, c, fn}; 1230 res = call_pthread_cancel_with_cleanup( 1231 [](void *arg) -> int { 1232 return ((const CondMutexUnlockCtx<Fn> *)arg)->Cancel(); 1233 }, 1234 [](void *arg) { ((const CondMutexUnlockCtx<Fn> *)arg)->Unlock(); }, 1235 &arg); 1236 } 1237 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); 1238 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); 1239 return res; 1240 } 1241 1242 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { 1243 void *cond = init_cond(c); 1244 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); 1245 return cond_wait( 1246 thr, pc, &si, [=]() { return REAL(pthread_cond_wait)(cond, m); }, cond, 1247 m); 1248 } 1249 1250 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { 1251 void *cond = init_cond(c); 1252 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); 1253 return cond_wait( 1254 thr, pc, &si, 1255 [=]() { return REAL(pthread_cond_timedwait)(cond, m, abstime); }, cond, 1256 m); 1257 } 1258 1259 #if SANITIZER_LINUX 1260 INTERCEPTOR(int, pthread_cond_clockwait, void *c, void *m, 1261 __sanitizer_clockid_t clock, void *abstime) { 1262 void *cond = init_cond(c); 1263 SCOPED_TSAN_INTERCEPTOR(pthread_cond_clockwait, cond, m, clock, abstime); 1264 return cond_wait( 1265 thr, pc, &si, 1266 [=]() { return REAL(pthread_cond_clockwait)(cond, m, clock, abstime); }, 1267 cond, m); 1268 } 1269 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT TSAN_INTERCEPT(pthread_cond_clockwait) 1270 #else 1271 #define TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT 1272 #endif 1273 1274 #if SANITIZER_MAC 1275 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m, 1276 void *reltime) { 1277 void *cond = init_cond(c); 1278 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime); 1279 return cond_wait( 1280 thr, pc, &si, 1281 [=]() { 1282 return REAL(pthread_cond_timedwait_relative_np)(cond, m, reltime); 1283 }, 1284 cond, m); 1285 } 1286 #endif 1287 1288 INTERCEPTOR(int, pthread_cond_signal, void *c) { 1289 void *cond = init_cond(c); 1290 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); 1291 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1292 return REAL(pthread_cond_signal)(cond); 1293 } 1294 1295 INTERCEPTOR(int, pthread_cond_broadcast, void *c) { 1296 void *cond = init_cond(c); 1297 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); 1298 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1299 return REAL(pthread_cond_broadcast)(cond); 1300 } 1301 1302 INTERCEPTOR(int, pthread_cond_destroy, void *c) { 1303 void *cond = init_cond(c); 1304 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); 1305 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1306 int res = REAL(pthread_cond_destroy)(cond); 1307 if (common_flags()->legacy_pthread_cond) { 1308 // Free our aux cond and zero the pointer to not leave dangling pointers. 1309 WRAP(free)(cond); 1310 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); 1311 } 1312 return res; 1313 } 1314 1315 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { 1316 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); 1317 int res = REAL(pthread_mutex_init)(m, a); 1318 if (res == 0) { 1319 u32 flagz = 0; 1320 if (a) { 1321 int type = 0; 1322 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) 1323 if (type == PTHREAD_MUTEX_RECURSIVE || 1324 type == PTHREAD_MUTEX_RECURSIVE_NP) 1325 flagz |= MutexFlagWriteReentrant; 1326 } 1327 MutexCreate(thr, pc, (uptr)m, flagz); 1328 } 1329 return res; 1330 } 1331 1332 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { 1333 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); 1334 int res = REAL(pthread_mutex_destroy)(m); 1335 if (res == 0 || res == errno_EBUSY) { 1336 MutexDestroy(thr, pc, (uptr)m); 1337 } 1338 return res; 1339 } 1340 1341 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { 1342 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); 1343 int res = REAL(pthread_mutex_trylock)(m); 1344 if (res == errno_EOWNERDEAD) 1345 MutexRepair(thr, pc, (uptr)m); 1346 if (res == 0 || res == errno_EOWNERDEAD) 1347 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1348 return res; 1349 } 1350 1351 #if !SANITIZER_MAC 1352 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { 1353 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); 1354 int res = REAL(pthread_mutex_timedlock)(m, abstime); 1355 if (res == 0) { 1356 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1357 } 1358 return res; 1359 } 1360 #endif 1361 1362 #if !SANITIZER_MAC 1363 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { 1364 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); 1365 int res = REAL(pthread_spin_init)(m, pshared); 1366 if (res == 0) { 1367 MutexCreate(thr, pc, (uptr)m); 1368 } 1369 return res; 1370 } 1371 1372 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { 1373 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); 1374 int res = REAL(pthread_spin_destroy)(m); 1375 if (res == 0) { 1376 MutexDestroy(thr, pc, (uptr)m); 1377 } 1378 return res; 1379 } 1380 1381 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { 1382 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); 1383 MutexPreLock(thr, pc, (uptr)m); 1384 int res = REAL(pthread_spin_lock)(m); 1385 if (res == 0) { 1386 MutexPostLock(thr, pc, (uptr)m); 1387 } 1388 return res; 1389 } 1390 1391 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { 1392 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); 1393 int res = REAL(pthread_spin_trylock)(m); 1394 if (res == 0) { 1395 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1396 } 1397 return res; 1398 } 1399 1400 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { 1401 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); 1402 MutexUnlock(thr, pc, (uptr)m); 1403 int res = REAL(pthread_spin_unlock)(m); 1404 return res; 1405 } 1406 #endif 1407 1408 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { 1409 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); 1410 int res = REAL(pthread_rwlock_init)(m, a); 1411 if (res == 0) { 1412 MutexCreate(thr, pc, (uptr)m); 1413 } 1414 return res; 1415 } 1416 1417 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { 1418 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); 1419 int res = REAL(pthread_rwlock_destroy)(m); 1420 if (res == 0) { 1421 MutexDestroy(thr, pc, (uptr)m); 1422 } 1423 return res; 1424 } 1425 1426 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { 1427 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); 1428 MutexPreReadLock(thr, pc, (uptr)m); 1429 int res = REAL(pthread_rwlock_rdlock)(m); 1430 if (res == 0) { 1431 MutexPostReadLock(thr, pc, (uptr)m); 1432 } 1433 return res; 1434 } 1435 1436 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { 1437 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); 1438 int res = REAL(pthread_rwlock_tryrdlock)(m); 1439 if (res == 0) { 1440 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock); 1441 } 1442 return res; 1443 } 1444 1445 #if !SANITIZER_MAC 1446 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { 1447 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); 1448 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); 1449 if (res == 0) { 1450 MutexPostReadLock(thr, pc, (uptr)m); 1451 } 1452 return res; 1453 } 1454 #endif 1455 1456 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { 1457 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); 1458 MutexPreLock(thr, pc, (uptr)m); 1459 int res = REAL(pthread_rwlock_wrlock)(m); 1460 if (res == 0) { 1461 MutexPostLock(thr, pc, (uptr)m); 1462 } 1463 return res; 1464 } 1465 1466 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { 1467 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); 1468 int res = REAL(pthread_rwlock_trywrlock)(m); 1469 if (res == 0) { 1470 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1471 } 1472 return res; 1473 } 1474 1475 #if !SANITIZER_MAC 1476 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { 1477 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); 1478 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); 1479 if (res == 0) { 1480 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1481 } 1482 return res; 1483 } 1484 #endif 1485 1486 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { 1487 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); 1488 MutexReadOrWriteUnlock(thr, pc, (uptr)m); 1489 int res = REAL(pthread_rwlock_unlock)(m); 1490 return res; 1491 } 1492 1493 #if !SANITIZER_MAC 1494 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { 1495 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); 1496 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite); 1497 int res = REAL(pthread_barrier_init)(b, a, count); 1498 return res; 1499 } 1500 1501 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { 1502 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); 1503 MemoryAccess(thr, pc, (uptr)b, 1, kAccessWrite); 1504 int res = REAL(pthread_barrier_destroy)(b); 1505 return res; 1506 } 1507 1508 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { 1509 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); 1510 Release(thr, pc, (uptr)b); 1511 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead); 1512 int res = REAL(pthread_barrier_wait)(b); 1513 MemoryAccess(thr, pc, (uptr)b, 1, kAccessRead); 1514 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { 1515 Acquire(thr, pc, (uptr)b); 1516 } 1517 return res; 1518 } 1519 #endif 1520 1521 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { 1522 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); 1523 if (o == 0 || f == 0) 1524 return errno_EINVAL; 1525 atomic_uint32_t *a; 1526 1527 if (SANITIZER_MAC) 1528 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t))); 1529 else if (SANITIZER_NETBSD) 1530 a = static_cast<atomic_uint32_t*> 1531 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz)); 1532 else 1533 a = static_cast<atomic_uint32_t*>(o); 1534 1535 // Mac OS X appears to use pthread_once() where calling BlockingRegion hooks 1536 // result in crashes due to too little stack space. 1537 if (guard_acquire(thr, pc, a, !SANITIZER_MAC)) { 1538 (*f)(); 1539 guard_release(thr, pc, a, kGuardDone); 1540 } 1541 return 0; 1542 } 1543 1544 #if SANITIZER_GLIBC 1545 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { 1546 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); 1547 if (fd > 0) 1548 FdAccess(thr, pc, fd); 1549 return REAL(__fxstat)(version, fd, buf); 1550 } 1551 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) 1552 #else 1553 #define TSAN_MAYBE_INTERCEPT___FXSTAT 1554 #endif 1555 1556 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { 1557 #if SANITIZER_GLIBC 1558 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); 1559 if (fd > 0) 1560 FdAccess(thr, pc, fd); 1561 return REAL(__fxstat)(0, fd, buf); 1562 #else 1563 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); 1564 if (fd > 0) 1565 FdAccess(thr, pc, fd); 1566 return REAL(fstat)(fd, buf); 1567 #endif 1568 } 1569 1570 #if SANITIZER_GLIBC 1571 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { 1572 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); 1573 if (fd > 0) 1574 FdAccess(thr, pc, fd); 1575 return REAL(__fxstat64)(version, fd, buf); 1576 } 1577 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) 1578 #else 1579 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 1580 #endif 1581 1582 #if SANITIZER_GLIBC 1583 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { 1584 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); 1585 if (fd > 0) 1586 FdAccess(thr, pc, fd); 1587 return REAL(__fxstat64)(0, fd, buf); 1588 } 1589 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) 1590 #else 1591 #define TSAN_MAYBE_INTERCEPT_FSTAT64 1592 #endif 1593 1594 TSAN_INTERCEPTOR(int, open, const char *name, int oflag, ...) { 1595 va_list ap; 1596 va_start(ap, oflag); 1597 mode_t mode = va_arg(ap, int); 1598 va_end(ap); 1599 SCOPED_TSAN_INTERCEPTOR(open, name, oflag, mode); 1600 READ_STRING(thr, pc, name, 0); 1601 int fd = REAL(open)(name, oflag, mode); 1602 if (fd >= 0) 1603 FdFileCreate(thr, pc, fd); 1604 return fd; 1605 } 1606 1607 #if SANITIZER_LINUX 1608 TSAN_INTERCEPTOR(int, open64, const char *name, int oflag, ...) { 1609 va_list ap; 1610 va_start(ap, oflag); 1611 mode_t mode = va_arg(ap, int); 1612 va_end(ap); 1613 SCOPED_TSAN_INTERCEPTOR(open64, name, oflag, mode); 1614 READ_STRING(thr, pc, name, 0); 1615 int fd = REAL(open64)(name, oflag, mode); 1616 if (fd >= 0) 1617 FdFileCreate(thr, pc, fd); 1618 return fd; 1619 } 1620 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) 1621 #else 1622 #define TSAN_MAYBE_INTERCEPT_OPEN64 1623 #endif 1624 1625 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { 1626 SCOPED_TSAN_INTERCEPTOR(creat, name, mode); 1627 READ_STRING(thr, pc, name, 0); 1628 int fd = REAL(creat)(name, mode); 1629 if (fd >= 0) 1630 FdFileCreate(thr, pc, fd); 1631 return fd; 1632 } 1633 1634 #if SANITIZER_LINUX 1635 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { 1636 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); 1637 READ_STRING(thr, pc, name, 0); 1638 int fd = REAL(creat64)(name, mode); 1639 if (fd >= 0) 1640 FdFileCreate(thr, pc, fd); 1641 return fd; 1642 } 1643 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) 1644 #else 1645 #define TSAN_MAYBE_INTERCEPT_CREAT64 1646 #endif 1647 1648 TSAN_INTERCEPTOR(int, dup, int oldfd) { 1649 SCOPED_TSAN_INTERCEPTOR(dup, oldfd); 1650 int newfd = REAL(dup)(oldfd); 1651 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) 1652 FdDup(thr, pc, oldfd, newfd, true); 1653 return newfd; 1654 } 1655 1656 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { 1657 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); 1658 int newfd2 = REAL(dup2)(oldfd, newfd); 1659 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1660 FdDup(thr, pc, oldfd, newfd2, false); 1661 return newfd2; 1662 } 1663 1664 #if !SANITIZER_MAC 1665 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { 1666 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); 1667 int newfd2 = REAL(dup3)(oldfd, newfd, flags); 1668 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1669 FdDup(thr, pc, oldfd, newfd2, false); 1670 return newfd2; 1671 } 1672 #endif 1673 1674 #if SANITIZER_LINUX 1675 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { 1676 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); 1677 int fd = REAL(eventfd)(initval, flags); 1678 if (fd >= 0) 1679 FdEventCreate(thr, pc, fd); 1680 return fd; 1681 } 1682 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) 1683 #else 1684 #define TSAN_MAYBE_INTERCEPT_EVENTFD 1685 #endif 1686 1687 #if SANITIZER_LINUX 1688 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { 1689 SCOPED_INTERCEPTOR_RAW(signalfd, fd, mask, flags); 1690 FdClose(thr, pc, fd); 1691 fd = REAL(signalfd)(fd, mask, flags); 1692 if (!MustIgnoreInterceptor(thr)) 1693 FdSignalCreate(thr, pc, fd); 1694 return fd; 1695 } 1696 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) 1697 #else 1698 #define TSAN_MAYBE_INTERCEPT_SIGNALFD 1699 #endif 1700 1701 #if SANITIZER_LINUX 1702 TSAN_INTERCEPTOR(int, inotify_init, int fake) { 1703 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); 1704 int fd = REAL(inotify_init)(fake); 1705 if (fd >= 0) 1706 FdInotifyCreate(thr, pc, fd); 1707 return fd; 1708 } 1709 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) 1710 #else 1711 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT 1712 #endif 1713 1714 #if SANITIZER_LINUX 1715 TSAN_INTERCEPTOR(int, inotify_init1, int flags) { 1716 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); 1717 int fd = REAL(inotify_init1)(flags); 1718 if (fd >= 0) 1719 FdInotifyCreate(thr, pc, fd); 1720 return fd; 1721 } 1722 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) 1723 #else 1724 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 1725 #endif 1726 1727 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { 1728 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); 1729 int fd = REAL(socket)(domain, type, protocol); 1730 if (fd >= 0) 1731 FdSocketCreate(thr, pc, fd); 1732 return fd; 1733 } 1734 1735 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { 1736 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); 1737 int res = REAL(socketpair)(domain, type, protocol, fd); 1738 if (res == 0 && fd[0] >= 0 && fd[1] >= 0) 1739 FdPipeCreate(thr, pc, fd[0], fd[1]); 1740 return res; 1741 } 1742 1743 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { 1744 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); 1745 FdSocketConnecting(thr, pc, fd); 1746 int res = REAL(connect)(fd, addr, addrlen); 1747 if (res == 0 && fd >= 0) 1748 FdSocketConnect(thr, pc, fd); 1749 return res; 1750 } 1751 1752 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { 1753 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); 1754 int res = REAL(bind)(fd, addr, addrlen); 1755 if (fd > 0 && res == 0) 1756 FdAccess(thr, pc, fd); 1757 return res; 1758 } 1759 1760 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { 1761 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); 1762 int res = REAL(listen)(fd, backlog); 1763 if (fd > 0 && res == 0) 1764 FdAccess(thr, pc, fd); 1765 return res; 1766 } 1767 1768 TSAN_INTERCEPTOR(int, close, int fd) { 1769 SCOPED_INTERCEPTOR_RAW(close, fd); 1770 FdClose(thr, pc, fd); 1771 return REAL(close)(fd); 1772 } 1773 1774 #if SANITIZER_LINUX 1775 TSAN_INTERCEPTOR(int, __close, int fd) { 1776 SCOPED_INTERCEPTOR_RAW(__close, fd); 1777 FdClose(thr, pc, fd); 1778 return REAL(__close)(fd); 1779 } 1780 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) 1781 #else 1782 #define TSAN_MAYBE_INTERCEPT___CLOSE 1783 #endif 1784 1785 // glibc guts 1786 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1787 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { 1788 SCOPED_INTERCEPTOR_RAW(__res_iclose, state, free_addr); 1789 int fds[64]; 1790 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); 1791 for (int i = 0; i < cnt; i++) FdClose(thr, pc, fds[i]); 1792 REAL(__res_iclose)(state, free_addr); 1793 } 1794 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) 1795 #else 1796 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE 1797 #endif 1798 1799 TSAN_INTERCEPTOR(int, pipe, int *pipefd) { 1800 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); 1801 int res = REAL(pipe)(pipefd); 1802 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1803 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1804 return res; 1805 } 1806 1807 #if !SANITIZER_MAC 1808 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { 1809 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); 1810 int res = REAL(pipe2)(pipefd, flags); 1811 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1812 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1813 return res; 1814 } 1815 #endif 1816 1817 TSAN_INTERCEPTOR(int, unlink, char *path) { 1818 SCOPED_TSAN_INTERCEPTOR(unlink, path); 1819 Release(thr, pc, File2addr(path)); 1820 int res = REAL(unlink)(path); 1821 return res; 1822 } 1823 1824 TSAN_INTERCEPTOR(void*, tmpfile, int fake) { 1825 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); 1826 void *res = REAL(tmpfile)(fake); 1827 if (res) { 1828 int fd = fileno_unlocked(res); 1829 if (fd >= 0) 1830 FdFileCreate(thr, pc, fd); 1831 } 1832 return res; 1833 } 1834 1835 #if SANITIZER_LINUX 1836 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { 1837 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); 1838 void *res = REAL(tmpfile64)(fake); 1839 if (res) { 1840 int fd = fileno_unlocked(res); 1841 if (fd >= 0) 1842 FdFileCreate(thr, pc, fd); 1843 } 1844 return res; 1845 } 1846 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) 1847 #else 1848 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 1849 #endif 1850 1851 static void FlushStreams() { 1852 // Flushing all the streams here may freeze the process if a child thread is 1853 // performing file stream operations at the same time. 1854 REAL(fflush)(stdout); 1855 REAL(fflush)(stderr); 1856 } 1857 1858 TSAN_INTERCEPTOR(void, abort, int fake) { 1859 SCOPED_TSAN_INTERCEPTOR(abort, fake); 1860 FlushStreams(); 1861 REAL(abort)(fake); 1862 } 1863 1864 TSAN_INTERCEPTOR(int, rmdir, char *path) { 1865 SCOPED_TSAN_INTERCEPTOR(rmdir, path); 1866 Release(thr, pc, Dir2addr(path)); 1867 int res = REAL(rmdir)(path); 1868 return res; 1869 } 1870 1871 TSAN_INTERCEPTOR(int, closedir, void *dirp) { 1872 SCOPED_INTERCEPTOR_RAW(closedir, dirp); 1873 if (dirp) { 1874 int fd = dirfd(dirp); 1875 FdClose(thr, pc, fd); 1876 } 1877 return REAL(closedir)(dirp); 1878 } 1879 1880 #if SANITIZER_LINUX 1881 TSAN_INTERCEPTOR(int, epoll_create, int size) { 1882 SCOPED_TSAN_INTERCEPTOR(epoll_create, size); 1883 int fd = REAL(epoll_create)(size); 1884 if (fd >= 0) 1885 FdPollCreate(thr, pc, fd); 1886 return fd; 1887 } 1888 1889 TSAN_INTERCEPTOR(int, epoll_create1, int flags) { 1890 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); 1891 int fd = REAL(epoll_create1)(flags); 1892 if (fd >= 0) 1893 FdPollCreate(thr, pc, fd); 1894 return fd; 1895 } 1896 1897 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { 1898 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); 1899 if (epfd >= 0) 1900 FdAccess(thr, pc, epfd); 1901 if (epfd >= 0 && fd >= 0) 1902 FdAccess(thr, pc, fd); 1903 if (op == EPOLL_CTL_ADD && epfd >= 0) 1904 FdRelease(thr, pc, epfd); 1905 int res = REAL(epoll_ctl)(epfd, op, fd, ev); 1906 return res; 1907 } 1908 1909 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { 1910 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); 1911 if (epfd >= 0) 1912 FdAccess(thr, pc, epfd); 1913 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); 1914 if (res > 0 && epfd >= 0) 1915 FdAcquire(thr, pc, epfd); 1916 return res; 1917 } 1918 1919 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout, 1920 void *sigmask) { 1921 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask); 1922 if (epfd >= 0) 1923 FdAccess(thr, pc, epfd); 1924 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask); 1925 if (res > 0 && epfd >= 0) 1926 FdAcquire(thr, pc, epfd); 1927 return res; 1928 } 1929 1930 #define TSAN_MAYBE_INTERCEPT_EPOLL \ 1931 TSAN_INTERCEPT(epoll_create); \ 1932 TSAN_INTERCEPT(epoll_create1); \ 1933 TSAN_INTERCEPT(epoll_ctl); \ 1934 TSAN_INTERCEPT(epoll_wait); \ 1935 TSAN_INTERCEPT(epoll_pwait) 1936 #else 1937 #define TSAN_MAYBE_INTERCEPT_EPOLL 1938 #endif 1939 1940 // The following functions are intercepted merely to process pending signals. 1941 // If program blocks signal X, we must deliver the signal before the function 1942 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend) 1943 // it's better to deliver the signal straight away. 1944 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { 1945 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); 1946 return REAL(sigsuspend)(mask); 1947 } 1948 1949 TSAN_INTERCEPTOR(int, sigblock, int mask) { 1950 SCOPED_TSAN_INTERCEPTOR(sigblock, mask); 1951 return REAL(sigblock)(mask); 1952 } 1953 1954 TSAN_INTERCEPTOR(int, sigsetmask, int mask) { 1955 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask); 1956 return REAL(sigsetmask)(mask); 1957 } 1958 1959 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set, 1960 __sanitizer_sigset_t *oldset) { 1961 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset); 1962 return REAL(pthread_sigmask)(how, set, oldset); 1963 } 1964 1965 namespace __tsan { 1966 1967 static void ReportErrnoSpoiling(ThreadState *thr, uptr pc) { 1968 VarSizeStackTrace stack; 1969 // StackTrace::GetNestInstructionPc(pc) is used because return address is 1970 // expected, OutputReport() will undo this. 1971 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); 1972 ThreadRegistryLock l(&ctx->thread_registry); 1973 ScopedReport rep(ReportTypeErrnoInSignal); 1974 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { 1975 rep.AddStack(stack, true); 1976 OutputReport(thr, rep); 1977 } 1978 } 1979 1980 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, 1981 int sig, __sanitizer_siginfo *info, 1982 void *uctx) { 1983 CHECK(thr->slot); 1984 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 1985 if (acquire) 1986 Acquire(thr, 0, (uptr)&sigactions[sig]); 1987 // Signals are generally asynchronous, so if we receive a signals when 1988 // ignores are enabled we should disable ignores. This is critical for sync 1989 // and interceptors, because otherwise we can miss synchronization and report 1990 // false races. 1991 int ignore_reads_and_writes = thr->ignore_reads_and_writes; 1992 int ignore_interceptors = thr->ignore_interceptors; 1993 int ignore_sync = thr->ignore_sync; 1994 // For symbolizer we only process SIGSEGVs synchronously 1995 // (bug in symbolizer or in tsan). But we want to reset 1996 // in_symbolizer to fail gracefully. Symbolizer and user code 1997 // use different memory allocators, so if we don't reset 1998 // in_symbolizer we can get memory allocated with one being 1999 // feed with another, which can cause more crashes. 2000 int in_symbolizer = thr->in_symbolizer; 2001 if (!ctx->after_multithreaded_fork) { 2002 thr->ignore_reads_and_writes = 0; 2003 thr->fast_state.ClearIgnoreBit(); 2004 thr->ignore_interceptors = 0; 2005 thr->ignore_sync = 0; 2006 thr->in_symbolizer = 0; 2007 } 2008 // Ensure that the handler does not spoil errno. 2009 const int saved_errno = errno; 2010 errno = 99; 2011 // This code races with sigaction. Be careful to not read sa_sigaction twice. 2012 // Also need to remember pc for reporting before the call, 2013 // because the handler can reset it. 2014 volatile uptr pc = (sigactions[sig].sa_flags & SA_SIGINFO) 2015 ? (uptr)sigactions[sig].sigaction 2016 : (uptr)sigactions[sig].handler; 2017 if (pc != sig_dfl && pc != sig_ign) { 2018 // The callback can be either sa_handler or sa_sigaction. 2019 // They have different signatures, but we assume that passing 2020 // additional arguments to sa_handler works and is harmless. 2021 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx); 2022 } 2023 if (!ctx->after_multithreaded_fork) { 2024 thr->ignore_reads_and_writes = ignore_reads_and_writes; 2025 if (ignore_reads_and_writes) 2026 thr->fast_state.SetIgnoreBit(); 2027 thr->ignore_interceptors = ignore_interceptors; 2028 thr->ignore_sync = ignore_sync; 2029 thr->in_symbolizer = in_symbolizer; 2030 } 2031 // We do not detect errno spoiling for SIGTERM, 2032 // because some SIGTERM handlers do spoil errno but reraise SIGTERM, 2033 // tsan reports false positive in such case. 2034 // It's difficult to properly detect this situation (reraise), 2035 // because in async signal processing case (when handler is called directly 2036 // from rtl_generic_sighandler) we have not yet received the reraised 2037 // signal; and it looks too fragile to intercept all ways to reraise a signal. 2038 if (ShouldReport(thr, ReportTypeErrnoInSignal) && !sync && sig != SIGTERM && 2039 errno != 99) 2040 ReportErrnoSpoiling(thr, pc); 2041 errno = saved_errno; 2042 } 2043 2044 void ProcessPendingSignalsImpl(ThreadState *thr) { 2045 atomic_store(&thr->pending_signals, 0, memory_order_relaxed); 2046 ThreadSignalContext *sctx = SigCtx(thr); 2047 if (sctx == 0) 2048 return; 2049 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 2050 internal_sigfillset(&sctx->emptyset); 2051 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset); 2052 CHECK_EQ(res, 0); 2053 for (int sig = 0; sig < kSigCount; sig++) { 2054 SignalDesc *signal = &sctx->pending_signals[sig]; 2055 if (signal->armed) { 2056 signal->armed = false; 2057 CallUserSignalHandler(thr, false, true, sig, &signal->siginfo, 2058 &signal->ctx); 2059 } 2060 } 2061 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0); 2062 CHECK_EQ(res, 0); 2063 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 2064 } 2065 2066 } // namespace __tsan 2067 2068 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { 2069 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || sig == SIGTRAP || 2070 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || 2071 // If we are sending signal to ourselves, we must process it now. 2072 (sctx && sig == sctx->int_signal_send); 2073 } 2074 2075 void sighandler(int sig, __sanitizer_siginfo *info, void *ctx) { 2076 ThreadState *thr = cur_thread_init(); 2077 ThreadSignalContext *sctx = SigCtx(thr); 2078 if (sig < 0 || sig >= kSigCount) { 2079 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); 2080 return; 2081 } 2082 // Don't mess with synchronous signals. 2083 const bool sync = is_sync_signal(sctx, sig); 2084 if (sync || 2085 // If we are in blocking function, we can safely process it now 2086 // (but check if we are in a recursive interceptor, 2087 // i.e. pthread_join()->munmap()). 2088 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { 2089 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 2090 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { 2091 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); 2092 CallUserSignalHandler(thr, sync, true, sig, info, ctx); 2093 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); 2094 } else { 2095 // Be very conservative with when we do acquire in this case. 2096 // It's unsafe to do acquire in async handlers, because ThreadState 2097 // can be in inconsistent state. 2098 // SIGSYS looks relatively safe -- it's synchronous and can actually 2099 // need some global state. 2100 bool acq = (sig == SIGSYS); 2101 CallUserSignalHandler(thr, sync, acq, sig, info, ctx); 2102 } 2103 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 2104 return; 2105 } 2106 2107 if (sctx == 0) 2108 return; 2109 SignalDesc *signal = &sctx->pending_signals[sig]; 2110 if (signal->armed == false) { 2111 signal->armed = true; 2112 internal_memcpy(&signal->siginfo, info, sizeof(*info)); 2113 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); 2114 atomic_store(&thr->pending_signals, 1, memory_order_relaxed); 2115 } 2116 } 2117 2118 TSAN_INTERCEPTOR(int, raise, int sig) { 2119 SCOPED_TSAN_INTERCEPTOR(raise, sig); 2120 ThreadSignalContext *sctx = SigCtx(thr); 2121 CHECK_NE(sctx, 0); 2122 int prev = sctx->int_signal_send; 2123 sctx->int_signal_send = sig; 2124 int res = REAL(raise)(sig); 2125 CHECK_EQ(sctx->int_signal_send, sig); 2126 sctx->int_signal_send = prev; 2127 return res; 2128 } 2129 2130 TSAN_INTERCEPTOR(int, kill, int pid, int sig) { 2131 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); 2132 ThreadSignalContext *sctx = SigCtx(thr); 2133 CHECK_NE(sctx, 0); 2134 int prev = sctx->int_signal_send; 2135 if (pid == (int)internal_getpid()) { 2136 sctx->int_signal_send = sig; 2137 } 2138 int res = REAL(kill)(pid, sig); 2139 if (pid == (int)internal_getpid()) { 2140 CHECK_EQ(sctx->int_signal_send, sig); 2141 sctx->int_signal_send = prev; 2142 } 2143 return res; 2144 } 2145 2146 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { 2147 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); 2148 ThreadSignalContext *sctx = SigCtx(thr); 2149 CHECK_NE(sctx, 0); 2150 int prev = sctx->int_signal_send; 2151 bool self = pthread_equal(tid, pthread_self()); 2152 if (self) 2153 sctx->int_signal_send = sig; 2154 int res = REAL(pthread_kill)(tid, sig); 2155 if (self) { 2156 CHECK_EQ(sctx->int_signal_send, sig); 2157 sctx->int_signal_send = prev; 2158 } 2159 return res; 2160 } 2161 2162 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { 2163 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); 2164 // It's intercepted merely to process pending signals. 2165 return REAL(gettimeofday)(tv, tz); 2166 } 2167 2168 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, 2169 void *hints, void *rv) { 2170 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); 2171 // We miss atomic synchronization in getaddrinfo, 2172 // and can report false race between malloc and free 2173 // inside of getaddrinfo. So ignore memory accesses. 2174 ThreadIgnoreBegin(thr, pc); 2175 int res = REAL(getaddrinfo)(node, service, hints, rv); 2176 ThreadIgnoreEnd(thr); 2177 return res; 2178 } 2179 2180 TSAN_INTERCEPTOR(int, fork, int fake) { 2181 if (in_symbolizer()) 2182 return REAL(fork)(fake); 2183 SCOPED_INTERCEPTOR_RAW(fork, fake); 2184 return REAL(fork)(fake); 2185 } 2186 2187 void atfork_prepare() { 2188 if (in_symbolizer()) 2189 return; 2190 ThreadState *thr = cur_thread(); 2191 const uptr pc = StackTrace::GetCurrentPc(); 2192 ForkBefore(thr, pc); 2193 } 2194 2195 void atfork_parent() { 2196 if (in_symbolizer()) 2197 return; 2198 ThreadState *thr = cur_thread(); 2199 const uptr pc = StackTrace::GetCurrentPc(); 2200 ForkParentAfter(thr, pc); 2201 } 2202 2203 void atfork_child() { 2204 if (in_symbolizer()) 2205 return; 2206 ThreadState *thr = cur_thread(); 2207 const uptr pc = StackTrace::GetCurrentPc(); 2208 ForkChildAfter(thr, pc, true); 2209 FdOnFork(thr, pc); 2210 } 2211 2212 #if !SANITIZER_IOS 2213 TSAN_INTERCEPTOR(int, vfork, int fake) { 2214 // Some programs (e.g. openjdk) call close for all file descriptors 2215 // in the child process. Under tsan it leads to false positives, because 2216 // address space is shared, so the parent process also thinks that 2217 // the descriptors are closed (while they are actually not). 2218 // This leads to false positives due to missed synchronization. 2219 // Strictly saying this is undefined behavior, because vfork child is not 2220 // allowed to call any functions other than exec/exit. But this is what 2221 // openjdk does, so we want to handle it. 2222 // We could disable interceptors in the child process. But it's not possible 2223 // to simply intercept and wrap vfork, because vfork child is not allowed 2224 // to return from the function that calls vfork, and that's exactly what 2225 // we would do. So this would require some assembly trickery as well. 2226 // Instead we simply turn vfork into fork. 2227 return WRAP(fork)(fake); 2228 } 2229 #endif 2230 2231 #if SANITIZER_LINUX 2232 TSAN_INTERCEPTOR(int, clone, int (*fn)(void *), void *stack, int flags, 2233 void *arg, int *parent_tid, void *tls, pid_t *child_tid) { 2234 SCOPED_INTERCEPTOR_RAW(clone, fn, stack, flags, arg, parent_tid, tls, 2235 child_tid); 2236 struct Arg { 2237 int (*fn)(void *); 2238 void *arg; 2239 }; 2240 auto wrapper = +[](void *p) -> int { 2241 auto *thr = cur_thread(); 2242 uptr pc = GET_CURRENT_PC(); 2243 // Start the background thread for fork, but not for clone. 2244 // For fork we did this always and it's known to work (or user code has 2245 // adopted). But if we do this for the new clone interceptor some code 2246 // (sandbox2) fails. So model we used to do for years and don't start the 2247 // background thread after clone. 2248 ForkChildAfter(thr, pc, false); 2249 FdOnFork(thr, pc); 2250 auto *arg = static_cast<Arg *>(p); 2251 return arg->fn(arg->arg); 2252 }; 2253 ForkBefore(thr, pc); 2254 Arg arg_wrapper = {fn, arg}; 2255 int pid = REAL(clone)(wrapper, stack, flags, &arg_wrapper, parent_tid, tls, 2256 child_tid); 2257 ForkParentAfter(thr, pc); 2258 return pid; 2259 } 2260 #endif 2261 2262 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2263 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, 2264 void *data); 2265 struct dl_iterate_phdr_data { 2266 ThreadState *thr; 2267 uptr pc; 2268 dl_iterate_phdr_cb_t cb; 2269 void *data; 2270 }; 2271 2272 static bool IsAppNotRodata(uptr addr) { 2273 return IsAppMem(addr) && *MemToShadow(addr) != Shadow::kRodata; 2274 } 2275 2276 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, 2277 void *data) { 2278 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; 2279 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later 2280 // accessible in dl_iterate_phdr callback. But we don't see synchronization 2281 // inside of dynamic linker, so we "unpoison" it here in order to not 2282 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough 2283 // because some libc functions call __libc_dlopen. 2284 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2285 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2286 internal_strlen(info->dlpi_name)); 2287 int res = cbdata->cb(info, size, cbdata->data); 2288 // Perform the check one more time in case info->dlpi_name was overwritten 2289 // by user callback. 2290 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2291 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2292 internal_strlen(info->dlpi_name)); 2293 return res; 2294 } 2295 2296 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { 2297 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); 2298 dl_iterate_phdr_data cbdata; 2299 cbdata.thr = thr; 2300 cbdata.pc = pc; 2301 cbdata.cb = cb; 2302 cbdata.data = data; 2303 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); 2304 return res; 2305 } 2306 #endif 2307 2308 static int OnExit(ThreadState *thr) { 2309 int status = Finalize(thr); 2310 FlushStreams(); 2311 return status; 2312 } 2313 2314 struct TsanInterceptorContext { 2315 ThreadState *thr; 2316 const uptr pc; 2317 }; 2318 2319 #if !SANITIZER_MAC 2320 static void HandleRecvmsg(ThreadState *thr, uptr pc, 2321 __sanitizer_msghdr *msg) { 2322 int fds[64]; 2323 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); 2324 for (int i = 0; i < cnt; i++) 2325 FdEventCreate(thr, pc, fds[i]); 2326 } 2327 #endif 2328 2329 #include "sanitizer_common/sanitizer_platform_interceptors.h" 2330 // Causes interceptor recursion (getaddrinfo() and fopen()) 2331 #undef SANITIZER_INTERCEPT_GETADDRINFO 2332 // We define our own. 2333 #if SANITIZER_INTERCEPT_TLS_GET_ADDR 2334 #define NEED_TLS_GET_ADDR 2335 #endif 2336 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR 2337 #define SANITIZER_INTERCEPT_TLS_GET_OFFSET 1 2338 #undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK 2339 2340 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) 2341 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ 2342 INTERCEPT_FUNCTION_VER(name, ver) 2343 #define COMMON_INTERCEPT_FUNCTION_VER_UNVERSIONED_FALLBACK(name, ver) \ 2344 (INTERCEPT_FUNCTION_VER(name, ver) || INTERCEPT_FUNCTION(name)) 2345 2346 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ 2347 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ 2348 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ 2349 true) 2350 2351 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ 2352 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ 2353 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ 2354 false) 2355 2356 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ 2357 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ 2358 TsanInterceptorContext _ctx = {thr, pc}; \ 2359 ctx = (void *)&_ctx; \ 2360 (void)ctx; 2361 2362 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ 2363 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ 2364 TsanInterceptorContext _ctx = {thr, pc}; \ 2365 ctx = (void *)&_ctx; \ 2366 (void)ctx; 2367 2368 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ 2369 if (path) \ 2370 Acquire(thr, pc, File2addr(path)); \ 2371 if (file) { \ 2372 int fd = fileno_unlocked(file); \ 2373 if (fd >= 0) FdFileCreate(thr, pc, fd); \ 2374 } 2375 2376 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ 2377 if (file) { \ 2378 int fd = fileno_unlocked(file); \ 2379 FdClose(thr, pc, fd); \ 2380 } 2381 2382 #define COMMON_INTERCEPTOR_DLOPEN(filename, flag) \ 2383 ({ \ 2384 CheckNoDeepBind(filename, flag); \ 2385 ThreadIgnoreBegin(thr, 0); \ 2386 void *res = REAL(dlopen)(filename, flag); \ 2387 ThreadIgnoreEnd(thr); \ 2388 res; \ 2389 }) 2390 2391 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ 2392 libignore()->OnLibraryLoaded(filename) 2393 2394 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ 2395 libignore()->OnLibraryUnloaded() 2396 2397 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ 2398 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) 2399 2400 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ 2401 Release(((TsanInterceptorContext *) ctx)->thr, pc, u) 2402 2403 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ 2404 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) 2405 2406 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ 2407 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2408 2409 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ 2410 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2411 2412 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ 2413 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2414 2415 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ 2416 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) 2417 2418 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ 2419 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) 2420 2421 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ 2422 if (pthread_equal(pthread_self(), reinterpret_cast<void *>(thread))) \ 2423 COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name); \ 2424 else \ 2425 __tsan::ctx->thread_registry.SetThreadNameByUserId(thread, name) 2426 2427 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) 2428 2429 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ 2430 OnExit(((TsanInterceptorContext *) ctx)->thr) 2431 2432 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \ 2433 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \ 2434 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2435 2436 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \ 2437 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \ 2438 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2439 2440 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ 2441 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ 2442 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2443 2444 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ 2445 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ 2446 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2447 2448 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \ 2449 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \ 2450 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2451 2452 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ 2453 off) \ 2454 do { \ 2455 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \ 2456 off); \ 2457 } while (false) 2458 2459 #if !SANITIZER_MAC 2460 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ 2461 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ 2462 ((TsanInterceptorContext *)ctx)->pc, msg) 2463 #endif 2464 2465 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ 2466 if (TsanThread *t = GetCurrentThread()) { \ 2467 *begin = t->tls_begin(); \ 2468 *end = t->tls_end(); \ 2469 } else { \ 2470 *begin = *end = 0; \ 2471 } 2472 2473 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \ 2474 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() 2475 2476 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \ 2477 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() 2478 2479 #include "sanitizer_common/sanitizer_common_interceptors.inc" 2480 2481 static int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2482 __sanitizer_sigaction *old); 2483 static __sanitizer_sighandler_ptr signal_impl(int sig, 2484 __sanitizer_sighandler_ptr h); 2485 2486 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \ 2487 { return sigaction_impl(signo, act, oldact); } 2488 2489 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \ 2490 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); } 2491 2492 #include "sanitizer_common/sanitizer_signal_interceptors.inc" 2493 2494 int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2495 __sanitizer_sigaction *old) { 2496 // Note: if we call REAL(sigaction) directly for any reason without proxying 2497 // the signal handler through sighandler, very bad things will happen. 2498 // The handler will run synchronously and corrupt tsan per-thread state. 2499 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old); 2500 if (sig <= 0 || sig >= kSigCount) { 2501 errno = errno_EINVAL; 2502 return -1; 2503 } 2504 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 2505 __sanitizer_sigaction old_stored; 2506 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored)); 2507 __sanitizer_sigaction newact; 2508 if (act) { 2509 // Copy act into sigactions[sig]. 2510 // Can't use struct copy, because compiler can emit call to memcpy. 2511 // Can't use internal_memcpy, because it copies byte-by-byte, 2512 // and signal handler reads the handler concurrently. It it can read 2513 // some bytes from old value and some bytes from new value. 2514 // Use volatile to prevent insertion of memcpy. 2515 sigactions[sig].handler = 2516 *(volatile __sanitizer_sighandler_ptr const *)&act->handler; 2517 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags; 2518 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, 2519 sizeof(sigactions[sig].sa_mask)); 2520 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 2521 sigactions[sig].sa_restorer = act->sa_restorer; 2522 #endif 2523 internal_memcpy(&newact, act, sizeof(newact)); 2524 internal_sigfillset(&newact.sa_mask); 2525 if ((act->sa_flags & SA_SIGINFO) || 2526 ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl)) { 2527 newact.sa_flags |= SA_SIGINFO; 2528 newact.sigaction = sighandler; 2529 } 2530 ReleaseStore(thr, pc, (uptr)&sigactions[sig]); 2531 act = &newact; 2532 } 2533 int res = REAL(sigaction)(sig, act, old); 2534 if (res == 0 && old && old->sigaction == sighandler) 2535 internal_memcpy(old, &old_stored, sizeof(*old)); 2536 return res; 2537 } 2538 2539 static __sanitizer_sighandler_ptr signal_impl(int sig, 2540 __sanitizer_sighandler_ptr h) { 2541 __sanitizer_sigaction act; 2542 act.handler = h; 2543 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask)); 2544 act.sa_flags = 0; 2545 __sanitizer_sigaction old; 2546 int res = sigaction_symname(sig, &act, &old); 2547 if (res) return (__sanitizer_sighandler_ptr)sig_err; 2548 return old.handler; 2549 } 2550 2551 #define TSAN_SYSCALL() \ 2552 ThreadState *thr = cur_thread(); \ 2553 if (thr->ignore_interceptors) \ 2554 return; \ 2555 ScopedSyscall scoped_syscall(thr) 2556 2557 struct ScopedSyscall { 2558 ThreadState *thr; 2559 2560 explicit ScopedSyscall(ThreadState *thr) : thr(thr) { LazyInitialize(thr); } 2561 2562 ~ScopedSyscall() { 2563 ProcessPendingSignals(thr); 2564 } 2565 }; 2566 2567 #if !SANITIZER_FREEBSD && !SANITIZER_MAC 2568 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { 2569 TSAN_SYSCALL(); 2570 MemoryAccessRange(thr, pc, p, s, write); 2571 } 2572 2573 static USED void syscall_acquire(uptr pc, uptr addr) { 2574 TSAN_SYSCALL(); 2575 Acquire(thr, pc, addr); 2576 DPrintf("syscall_acquire(0x%zx))\n", addr); 2577 } 2578 2579 static USED void syscall_release(uptr pc, uptr addr) { 2580 TSAN_SYSCALL(); 2581 DPrintf("syscall_release(0x%zx)\n", addr); 2582 Release(thr, pc, addr); 2583 } 2584 2585 static void syscall_fd_close(uptr pc, int fd) { 2586 auto *thr = cur_thread(); 2587 FdClose(thr, pc, fd); 2588 } 2589 2590 static USED void syscall_fd_acquire(uptr pc, int fd) { 2591 TSAN_SYSCALL(); 2592 FdAcquire(thr, pc, fd); 2593 DPrintf("syscall_fd_acquire(%d)\n", fd); 2594 } 2595 2596 static USED void syscall_fd_release(uptr pc, int fd) { 2597 TSAN_SYSCALL(); 2598 DPrintf("syscall_fd_release(%d)\n", fd); 2599 FdRelease(thr, pc, fd); 2600 } 2601 2602 static void syscall_pre_fork(uptr pc) { ForkBefore(cur_thread(), pc); } 2603 2604 static void syscall_post_fork(uptr pc, int pid) { 2605 ThreadState *thr = cur_thread(); 2606 if (pid == 0) { 2607 // child 2608 ForkChildAfter(thr, pc, true); 2609 FdOnFork(thr, pc); 2610 } else if (pid > 0) { 2611 // parent 2612 ForkParentAfter(thr, pc); 2613 } else { 2614 // error 2615 ForkParentAfter(thr, pc); 2616 } 2617 } 2618 #endif 2619 2620 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ 2621 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) 2622 2623 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ 2624 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) 2625 2626 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ 2627 do { \ 2628 (void)(p); \ 2629 (void)(s); \ 2630 } while (false) 2631 2632 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ 2633 do { \ 2634 (void)(p); \ 2635 (void)(s); \ 2636 } while (false) 2637 2638 #define COMMON_SYSCALL_ACQUIRE(addr) \ 2639 syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) 2640 2641 #define COMMON_SYSCALL_RELEASE(addr) \ 2642 syscall_release(GET_CALLER_PC(), (uptr)(addr)) 2643 2644 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) 2645 2646 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) 2647 2648 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) 2649 2650 #define COMMON_SYSCALL_PRE_FORK() \ 2651 syscall_pre_fork(GET_CALLER_PC()) 2652 2653 #define COMMON_SYSCALL_POST_FORK(res) \ 2654 syscall_post_fork(GET_CALLER_PC(), res) 2655 2656 #include "sanitizer_common/sanitizer_common_syscalls.inc" 2657 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc" 2658 2659 #ifdef NEED_TLS_GET_ADDR 2660 2661 static void handle_tls_addr(void *arg, void *res) { 2662 ThreadState *thr = cur_thread(); 2663 if (!thr) 2664 return; 2665 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, 2666 thr->tls_addr + thr->tls_size); 2667 if (!dtv) 2668 return; 2669 // New DTLS block has been allocated. 2670 MemoryResetRange(thr, 0, dtv->beg, dtv->size); 2671 } 2672 2673 #if !SANITIZER_S390 2674 // Define own interceptor instead of sanitizer_common's for three reasons: 2675 // 1. It must not process pending signals. 2676 // Signal handlers may contain MOVDQA instruction (see below). 2677 // 2. It must be as simple as possible to not contain MOVDQA. 2678 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which 2679 // is empty for tsan (meant only for msan). 2680 // Note: __tls_get_addr can be called with mis-aligned stack due to: 2681 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 2682 // So the interceptor must work with mis-aligned stack, in particular, does not 2683 // execute MOVDQA with stack addresses. 2684 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) { 2685 void *res = REAL(__tls_get_addr)(arg); 2686 handle_tls_addr(arg, res); 2687 return res; 2688 } 2689 #else // SANITIZER_S390 2690 TSAN_INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) { 2691 uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset)); 2692 char *tp = static_cast<char *>(__builtin_thread_pointer()); 2693 handle_tls_addr(arg, res + tp); 2694 return res; 2695 } 2696 #endif 2697 #endif 2698 2699 #if SANITIZER_NETBSD 2700 TSAN_INTERCEPTOR(void, _lwp_exit) { 2701 SCOPED_TSAN_INTERCEPTOR(_lwp_exit); 2702 DestroyThreadState(); 2703 REAL(_lwp_exit)(); 2704 } 2705 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit) 2706 #else 2707 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT 2708 #endif 2709 2710 #if SANITIZER_FREEBSD 2711 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { 2712 SCOPED_TSAN_INTERCEPTOR(thr_exit, state); 2713 DestroyThreadState(); 2714 REAL(thr_exit(state)); 2715 } 2716 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) 2717 #else 2718 #define TSAN_MAYBE_INTERCEPT_THR_EXIT 2719 #endif 2720 2721 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_init, void *c, void *a) 2722 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_destroy, void *c) 2723 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_signal, void *c) 2724 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_broadcast, void *c) 2725 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, cond_wait, void *c, void *m) 2726 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_init, void *m, void *a) 2727 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_destroy, void *m) 2728 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_lock, void *m) 2729 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_trylock, void *m) 2730 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, mutex_unlock, void *m) 2731 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_init, void *l, void *a) 2732 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_destroy, void *l) 2733 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_rdlock, void *l) 2734 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_tryrdlock, void *l) 2735 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_wrlock, void *l) 2736 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_trywrlock, void *l) 2737 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, rwlock_unlock, void *l) 2738 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, once, void *o, void (*i)()) 2739 TSAN_INTERCEPTOR_FREEBSD_ALIAS(int, sigmask, int f, void *n, void *o) 2740 2741 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a) 2742 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c) 2743 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c) 2744 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m) 2745 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c) 2746 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a) 2747 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m) 2748 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m) 2749 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a) 2750 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m) 2751 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m) 2752 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m) 2753 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m) 2754 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m) 2755 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m) 2756 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)()) 2757 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b, 2758 void *c) 2759 2760 namespace __tsan { 2761 2762 static void finalize(void *arg) { 2763 ThreadState *thr = cur_thread(); 2764 int status = Finalize(thr); 2765 // Make sure the output is not lost. 2766 FlushStreams(); 2767 if (status) 2768 Die(); 2769 } 2770 2771 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2772 static void unreachable() { 2773 Report("FATAL: ThreadSanitizer: unreachable called\n"); 2774 Die(); 2775 } 2776 #endif 2777 2778 // Define default implementation since interception of libdispatch is optional. 2779 SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {} 2780 2781 void InitializeInterceptors() { 2782 #if !SANITIZER_MAC 2783 // We need to setup it early, because functions like dlsym() can call it. 2784 REAL(memset) = internal_memset; 2785 REAL(memcpy) = internal_memcpy; 2786 #endif 2787 2788 new(interceptor_ctx()) InterceptorContext(); 2789 2790 InitializeCommonInterceptors(); 2791 InitializeSignalInterceptors(); 2792 InitializeLibdispatchInterceptors(); 2793 2794 #if !SANITIZER_MAC 2795 // We can not use TSAN_INTERCEPT to get setjmp addr, 2796 // because it does &setjmp and setjmp is not present in some versions of libc. 2797 using __interception::InterceptFunction; 2798 InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0); 2799 InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); 2800 InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0, 2801 0); 2802 #if !SANITIZER_NETBSD 2803 InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); 2804 #endif 2805 #endif 2806 2807 TSAN_INTERCEPT(longjmp_symname); 2808 TSAN_INTERCEPT(siglongjmp_symname); 2809 #if SANITIZER_NETBSD 2810 TSAN_INTERCEPT(_longjmp); 2811 #endif 2812 2813 TSAN_INTERCEPT(malloc); 2814 TSAN_INTERCEPT(__libc_memalign); 2815 TSAN_INTERCEPT(calloc); 2816 TSAN_INTERCEPT(realloc); 2817 TSAN_INTERCEPT(reallocarray); 2818 TSAN_INTERCEPT(free); 2819 TSAN_INTERCEPT(cfree); 2820 TSAN_INTERCEPT(munmap); 2821 TSAN_MAYBE_INTERCEPT_MEMALIGN; 2822 TSAN_INTERCEPT(valloc); 2823 TSAN_MAYBE_INTERCEPT_PVALLOC; 2824 TSAN_INTERCEPT(posix_memalign); 2825 2826 TSAN_INTERCEPT(strcpy); 2827 TSAN_INTERCEPT(strncpy); 2828 TSAN_INTERCEPT(strdup); 2829 2830 TSAN_INTERCEPT(pthread_create); 2831 TSAN_INTERCEPT(pthread_join); 2832 TSAN_INTERCEPT(pthread_detach); 2833 TSAN_INTERCEPT(pthread_exit); 2834 #if SANITIZER_LINUX 2835 TSAN_INTERCEPT(pthread_tryjoin_np); 2836 TSAN_INTERCEPT(pthread_timedjoin_np); 2837 #endif 2838 2839 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); 2840 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); 2841 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); 2842 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); 2843 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); 2844 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); 2845 2846 TSAN_MAYBE_PTHREAD_COND_CLOCKWAIT; 2847 2848 TSAN_INTERCEPT(pthread_mutex_init); 2849 TSAN_INTERCEPT(pthread_mutex_destroy); 2850 TSAN_INTERCEPT(pthread_mutex_trylock); 2851 TSAN_INTERCEPT(pthread_mutex_timedlock); 2852 2853 TSAN_INTERCEPT(pthread_spin_init); 2854 TSAN_INTERCEPT(pthread_spin_destroy); 2855 TSAN_INTERCEPT(pthread_spin_lock); 2856 TSAN_INTERCEPT(pthread_spin_trylock); 2857 TSAN_INTERCEPT(pthread_spin_unlock); 2858 2859 TSAN_INTERCEPT(pthread_rwlock_init); 2860 TSAN_INTERCEPT(pthread_rwlock_destroy); 2861 TSAN_INTERCEPT(pthread_rwlock_rdlock); 2862 TSAN_INTERCEPT(pthread_rwlock_tryrdlock); 2863 TSAN_INTERCEPT(pthread_rwlock_timedrdlock); 2864 TSAN_INTERCEPT(pthread_rwlock_wrlock); 2865 TSAN_INTERCEPT(pthread_rwlock_trywrlock); 2866 TSAN_INTERCEPT(pthread_rwlock_timedwrlock); 2867 TSAN_INTERCEPT(pthread_rwlock_unlock); 2868 2869 TSAN_INTERCEPT(pthread_barrier_init); 2870 TSAN_INTERCEPT(pthread_barrier_destroy); 2871 TSAN_INTERCEPT(pthread_barrier_wait); 2872 2873 TSAN_INTERCEPT(pthread_once); 2874 2875 TSAN_INTERCEPT(fstat); 2876 TSAN_MAYBE_INTERCEPT___FXSTAT; 2877 TSAN_MAYBE_INTERCEPT_FSTAT64; 2878 TSAN_MAYBE_INTERCEPT___FXSTAT64; 2879 TSAN_INTERCEPT(open); 2880 TSAN_MAYBE_INTERCEPT_OPEN64; 2881 TSAN_INTERCEPT(creat); 2882 TSAN_MAYBE_INTERCEPT_CREAT64; 2883 TSAN_INTERCEPT(dup); 2884 TSAN_INTERCEPT(dup2); 2885 TSAN_INTERCEPT(dup3); 2886 TSAN_MAYBE_INTERCEPT_EVENTFD; 2887 TSAN_MAYBE_INTERCEPT_SIGNALFD; 2888 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; 2889 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; 2890 TSAN_INTERCEPT(socket); 2891 TSAN_INTERCEPT(socketpair); 2892 TSAN_INTERCEPT(connect); 2893 TSAN_INTERCEPT(bind); 2894 TSAN_INTERCEPT(listen); 2895 TSAN_MAYBE_INTERCEPT_EPOLL; 2896 TSAN_INTERCEPT(close); 2897 TSAN_MAYBE_INTERCEPT___CLOSE; 2898 TSAN_MAYBE_INTERCEPT___RES_ICLOSE; 2899 TSAN_INTERCEPT(pipe); 2900 TSAN_INTERCEPT(pipe2); 2901 2902 TSAN_INTERCEPT(unlink); 2903 TSAN_INTERCEPT(tmpfile); 2904 TSAN_MAYBE_INTERCEPT_TMPFILE64; 2905 TSAN_INTERCEPT(abort); 2906 TSAN_INTERCEPT(rmdir); 2907 TSAN_INTERCEPT(closedir); 2908 2909 TSAN_INTERCEPT(sigsuspend); 2910 TSAN_INTERCEPT(sigblock); 2911 TSAN_INTERCEPT(sigsetmask); 2912 TSAN_INTERCEPT(pthread_sigmask); 2913 TSAN_INTERCEPT(raise); 2914 TSAN_INTERCEPT(kill); 2915 TSAN_INTERCEPT(pthread_kill); 2916 TSAN_INTERCEPT(sleep); 2917 TSAN_INTERCEPT(usleep); 2918 TSAN_INTERCEPT(nanosleep); 2919 TSAN_INTERCEPT(pause); 2920 TSAN_INTERCEPT(gettimeofday); 2921 TSAN_INTERCEPT(getaddrinfo); 2922 2923 TSAN_INTERCEPT(fork); 2924 TSAN_INTERCEPT(vfork); 2925 #if SANITIZER_LINUX 2926 TSAN_INTERCEPT(clone); 2927 #endif 2928 #if !SANITIZER_ANDROID 2929 TSAN_INTERCEPT(dl_iterate_phdr); 2930 #endif 2931 TSAN_MAYBE_INTERCEPT_ON_EXIT; 2932 TSAN_INTERCEPT(__cxa_atexit); 2933 TSAN_INTERCEPT(_exit); 2934 2935 #ifdef NEED_TLS_GET_ADDR 2936 #if !SANITIZER_S390 2937 TSAN_INTERCEPT(__tls_get_addr); 2938 #else 2939 TSAN_INTERCEPT(__tls_get_addr_internal); 2940 TSAN_INTERCEPT(__tls_get_offset); 2941 #endif 2942 #endif 2943 2944 TSAN_MAYBE_INTERCEPT__LWP_EXIT; 2945 TSAN_MAYBE_INTERCEPT_THR_EXIT; 2946 2947 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2948 // Need to setup it, because interceptors check that the function is resolved. 2949 // But atexit is emitted directly into the module, so can't be resolved. 2950 REAL(atexit) = (int(*)(void(*)()))unreachable; 2951 #endif 2952 2953 if (REAL(__cxa_atexit)(&finalize, 0, 0)) { 2954 Printf("ThreadSanitizer: failed to setup atexit callback\n"); 2955 Die(); 2956 } 2957 if (pthread_atfork(atfork_prepare, atfork_parent, atfork_child)) { 2958 Printf("ThreadSanitizer: failed to setup atfork callbacks\n"); 2959 Die(); 2960 } 2961 2962 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 2963 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) { 2964 Printf("ThreadSanitizer: failed to create thread key\n"); 2965 Die(); 2966 } 2967 #endif 2968 2969 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_init); 2970 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_destroy); 2971 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_signal); 2972 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_broadcast); 2973 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(cond_wait); 2974 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_init); 2975 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_destroy); 2976 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_lock); 2977 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_trylock); 2978 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(mutex_unlock); 2979 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_init); 2980 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_destroy); 2981 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_rdlock); 2982 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_tryrdlock); 2983 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_wrlock); 2984 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_trywrlock); 2985 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(rwlock_unlock); 2986 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(once); 2987 TSAN_MAYBE_INTERCEPT_FREEBSD_ALIAS(sigmask); 2988 2989 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init); 2990 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal); 2991 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast); 2992 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait); 2993 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy); 2994 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init); 2995 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy); 2996 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock); 2997 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init); 2998 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy); 2999 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock); 3000 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock); 3001 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock); 3002 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock); 3003 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock); 3004 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once); 3005 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask); 3006 3007 FdInit(); 3008 } 3009 3010 } // namespace __tsan 3011 3012 // Invisible barrier for tests. 3013 // There were several unsuccessful iterations for this functionality: 3014 // 1. Initially it was implemented in user code using 3015 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on 3016 // MacOS. Futexes are linux-specific for this matter. 3017 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic 3018 // "as-if synchronized via sleep" messages in reports which failed some 3019 // output tests. 3020 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- 3021 // visible events, which lead to "failed to restore stack trace" failures. 3022 // Note that no_sanitize_thread attribute does not turn off atomic interception 3023 // so attaching it to the function defined in user code does not help. 3024 // That's why we now have what we have. 3025 constexpr u32 kBarrierThreadBits = 10; 3026 constexpr u32 kBarrierThreads = 1 << kBarrierThreadBits; 3027 3028 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_init( 3029 atomic_uint32_t *barrier, u32 num_threads) { 3030 if (num_threads >= kBarrierThreads) { 3031 Printf("barrier_init: count is too large (%d)\n", num_threads); 3032 Die(); 3033 } 3034 // kBarrierThreadBits lsb is thread count, 3035 // the remaining are count of entered threads. 3036 atomic_store(barrier, num_threads, memory_order_relaxed); 3037 } 3038 3039 static u32 barrier_epoch(u32 value) { 3040 return (value >> kBarrierThreadBits) / (value & (kBarrierThreads - 1)); 3041 } 3042 3043 extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __tsan_testonly_barrier_wait( 3044 atomic_uint32_t *barrier) { 3045 u32 old = atomic_fetch_add(barrier, kBarrierThreads, memory_order_relaxed); 3046 u32 old_epoch = barrier_epoch(old); 3047 if (barrier_epoch(old + kBarrierThreads) != old_epoch) { 3048 FutexWake(barrier, (1 << 30)); 3049 return; 3050 } 3051 for (;;) { 3052 u32 cur = atomic_load(barrier, memory_order_relaxed); 3053 if (barrier_epoch(cur) != old_epoch) 3054 return; 3055 FutexWait(barrier, cur); 3056 } 3057 } 3058