1 //===-- tsan_platform_linux.cpp -------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of ThreadSanitizer (TSan), a race detector. 10 // 11 // Linux- and BSD-specific code. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_common/sanitizer_platform.h" 15 #if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD 16 17 #include "sanitizer_common/sanitizer_common.h" 18 #include "sanitizer_common/sanitizer_libc.h" 19 #include "sanitizer_common/sanitizer_linux.h" 20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 21 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 22 #include "sanitizer_common/sanitizer_posix.h" 23 #include "sanitizer_common/sanitizer_procmaps.h" 24 #include "sanitizer_common/sanitizer_stackdepot.h" 25 #include "sanitizer_common/sanitizer_stoptheworld.h" 26 #include "tsan_flags.h" 27 #include "tsan_platform.h" 28 #include "tsan_rtl.h" 29 30 #include <fcntl.h> 31 #include <pthread.h> 32 #include <signal.h> 33 #include <stdio.h> 34 #include <stdlib.h> 35 #include <string.h> 36 #include <stdarg.h> 37 #include <sys/mman.h> 38 #if SANITIZER_LINUX 39 #include <sys/personality.h> 40 #include <setjmp.h> 41 #endif 42 #include <sys/syscall.h> 43 #include <sys/socket.h> 44 #include <sys/time.h> 45 #include <sys/types.h> 46 #include <sys/resource.h> 47 #include <sys/stat.h> 48 #include <unistd.h> 49 #include <sched.h> 50 #include <dlfcn.h> 51 #if SANITIZER_LINUX 52 #define __need_res_state 53 #include <resolv.h> 54 #endif 55 56 #ifdef sa_handler 57 # undef sa_handler 58 #endif 59 60 #ifdef sa_sigaction 61 # undef sa_sigaction 62 #endif 63 64 #if SANITIZER_FREEBSD 65 extern "C" void *__libc_stack_end; 66 void *__libc_stack_end = 0; 67 #endif 68 69 #if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) && \ 70 !SANITIZER_GO 71 # define INIT_LONGJMP_XOR_KEY 1 72 #else 73 # define INIT_LONGJMP_XOR_KEY 0 74 #endif 75 76 #if INIT_LONGJMP_XOR_KEY 77 #include "interception/interception.h" 78 // Must be declared outside of other namespaces. 79 DECLARE_REAL(int, _setjmp, void *env) 80 #endif 81 82 namespace __tsan { 83 84 #if INIT_LONGJMP_XOR_KEY 85 static void InitializeLongjmpXorKey(); 86 static uptr longjmp_xor_key; 87 #endif 88 89 // Runtime detected VMA size. 90 uptr vmaSize; 91 92 enum { 93 MemTotal, 94 MemShadow, 95 MemMeta, 96 MemFile, 97 MemMmap, 98 MemHeap, 99 MemOther, 100 MemCount, 101 }; 102 103 void FillProfileCallback(uptr p, uptr rss, bool file, uptr *mem) { 104 mem[MemTotal] += rss; 105 if (p >= ShadowBeg() && p < ShadowEnd()) 106 mem[MemShadow] += rss; 107 else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) 108 mem[MemMeta] += rss; 109 else if ((p >= LoAppMemBeg() && p < LoAppMemEnd()) || 110 (p >= MidAppMemBeg() && p < MidAppMemEnd()) || 111 (p >= HiAppMemBeg() && p < HiAppMemEnd())) 112 mem[file ? MemFile : MemMmap] += rss; 113 else if (p >= HeapMemBeg() && p < HeapMemEnd()) 114 mem[MemHeap] += rss; 115 else 116 mem[MemOther] += rss; 117 } 118 119 void WriteMemoryProfile(char *buf, uptr buf_size, u64 uptime_ns) { 120 uptr mem[MemCount]; 121 internal_memset(mem, 0, sizeof(mem)); 122 GetMemoryProfile(FillProfileCallback, mem); 123 auto meta = ctx->metamap.GetMemoryStats(); 124 StackDepotStats stacks = StackDepotGetStats(); 125 uptr nthread, nlive; 126 ctx->thread_registry.GetNumberOfThreads(&nthread, &nlive); 127 uptr trace_mem; 128 { 129 Lock l(&ctx->slot_mtx); 130 trace_mem = ctx->trace_part_total_allocated * sizeof(TracePart); 131 } 132 uptr internal_stats[AllocatorStatCount]; 133 internal_allocator()->GetStats(internal_stats); 134 // All these are allocated from the common mmap region. 135 mem[MemMmap] -= meta.mem_block + meta.sync_obj + trace_mem + 136 stacks.allocated + internal_stats[AllocatorStatMapped]; 137 if (s64(mem[MemMmap]) < 0) 138 mem[MemMmap] = 0; 139 internal_snprintf( 140 buf, buf_size, 141 "==%zu== %llus [%zu]: RSS %zd MB: shadow:%zd meta:%zd file:%zd" 142 " mmap:%zd heap:%zd other:%zd intalloc:%zd memblocks:%zd syncobj:%zu" 143 " trace:%zu stacks=%zd threads=%zu/%zu\n", 144 internal_getpid(), uptime_ns / (1000 * 1000 * 1000), ctx->global_epoch, 145 mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20, 146 mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemHeap] >> 20, 147 mem[MemOther] >> 20, internal_stats[AllocatorStatMapped] >> 20, 148 meta.mem_block >> 20, meta.sync_obj >> 20, trace_mem >> 20, 149 stacks.allocated >> 20, nlive, nthread); 150 } 151 152 #if !SANITIZER_GO 153 // Mark shadow for .rodata sections with the special Shadow::kRodata marker. 154 // Accesses to .rodata can't race, so this saves time, memory and trace space. 155 static NOINLINE void MapRodata(char* buffer, uptr size) { 156 // First create temp file. 157 const char *tmpdir = GetEnv("TMPDIR"); 158 if (tmpdir == 0) 159 tmpdir = GetEnv("TEST_TMPDIR"); 160 #ifdef P_tmpdir 161 if (tmpdir == 0) 162 tmpdir = P_tmpdir; 163 #endif 164 if (tmpdir == 0) 165 return; 166 internal_snprintf(buffer, size, "%s/tsan.rodata.%d", 167 tmpdir, (int)internal_getpid()); 168 uptr openrv = internal_open(buffer, O_RDWR | O_CREAT | O_EXCL, 0600); 169 if (internal_iserror(openrv)) 170 return; 171 internal_unlink(buffer); // Unlink it now, so that we can reuse the buffer. 172 fd_t fd = openrv; 173 // Fill the file with Shadow::kRodata. 174 const uptr kMarkerSize = 512 * 1024 / sizeof(RawShadow); 175 InternalMmapVector<RawShadow> marker(kMarkerSize); 176 // volatile to prevent insertion of memset 177 for (volatile RawShadow *p = marker.data(); p < marker.data() + kMarkerSize; 178 p++) 179 *p = Shadow::kRodata; 180 internal_write(fd, marker.data(), marker.size() * sizeof(RawShadow)); 181 // Map the file into memory. 182 uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE, 183 MAP_PRIVATE | MAP_ANONYMOUS, fd, 0); 184 if (internal_iserror(page)) { 185 internal_close(fd); 186 return; 187 } 188 // Map the file into shadow of .rodata sections. 189 MemoryMappingLayout proc_maps(/*cache_enabled*/true); 190 // Reusing the buffer 'buffer'. 191 MemoryMappedSegment segment(buffer, size); 192 while (proc_maps.Next(&segment)) { 193 if (segment.filename[0] != 0 && segment.filename[0] != '[' && 194 segment.IsReadable() && segment.IsExecutable() && 195 !segment.IsWritable() && IsAppMem(segment.start)) { 196 // Assume it's .rodata 197 char *shadow_start = (char *)MemToShadow(segment.start); 198 char *shadow_end = (char *)MemToShadow(segment.end); 199 for (char *p = shadow_start; p < shadow_end; 200 p += marker.size() * sizeof(RawShadow)) { 201 internal_mmap( 202 p, Min<uptr>(marker.size() * sizeof(RawShadow), shadow_end - p), 203 PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0); 204 } 205 } 206 } 207 internal_close(fd); 208 } 209 210 void InitializeShadowMemoryPlatform() { 211 char buffer[256]; // Keep in a different frame. 212 MapRodata(buffer, sizeof(buffer)); 213 } 214 215 #endif // #if !SANITIZER_GO 216 217 # if !SANITIZER_GO 218 static void ReExecIfNeeded() { 219 // Go maps shadow memory lazily and works fine with limited address space. 220 // Unlimited stack is not a problem as well, because the executable 221 // is not compiled with -pie. 222 bool reexec = false; 223 // TSan doesn't play well with unlimited stack size (as stack 224 // overlaps with shadow memory). If we detect unlimited stack size, 225 // we re-exec the program with limited stack size as a best effort. 226 if (StackSizeIsUnlimited()) { 227 const uptr kMaxStackSize = 32 * 1024 * 1024; 228 VReport(1, 229 "Program is run with unlimited stack size, which wouldn't " 230 "work with ThreadSanitizer.\n" 231 "Re-execing with stack size limited to %zd bytes.\n", 232 kMaxStackSize); 233 SetStackSizeLimitInBytes(kMaxStackSize); 234 reexec = true; 235 } 236 237 if (!AddressSpaceIsUnlimited()) { 238 Report( 239 "WARNING: Program is run with limited virtual address space," 240 " which wouldn't work with ThreadSanitizer.\n"); 241 Report("Re-execing with unlimited virtual address space.\n"); 242 SetAddressSpaceUnlimited(); 243 reexec = true; 244 } 245 246 # if SANITIZER_LINUX 247 // ASLR personality check. 248 int old_personality = personality(0xffffffff); 249 bool aslr_on = 250 (old_personality != -1) && ((old_personality & ADDR_NO_RANDOMIZE) == 0); 251 252 # if SANITIZER_ANDROID && (defined(__aarch64__) || defined(__x86_64__)) 253 // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in 254 // linux kernel, the random gap between stack and mapped area is increased 255 // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover 256 // this big range, we should disable randomized virtual space on aarch64. 257 if (aslr_on) { 258 VReport(1, 259 "WARNING: Program is run with randomized virtual address " 260 "space, which wouldn't work with ThreadSanitizer on Android.\n" 261 "Re-execing with fixed virtual address space.\n"); 262 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); 263 reexec = true; 264 } 265 # endif 266 267 if (reexec) { 268 // Don't check the address space since we're going to re-exec anyway. 269 } else if (!CheckAndProtect(false, false, false)) { 270 if (aslr_on) { 271 // Disable ASLR if the memory layout was incompatible. 272 // Alternatively, we could just keep re-execing until we get lucky 273 // with a compatible randomized layout, but the risk is that if it's 274 // not an ASLR-related issue, we will be stuck in an infinite loop of 275 // re-execing (unless we change ReExec to pass a parameter of the 276 // number of retries allowed.) 277 VReport(1, 278 "WARNING: ThreadSanitizer: memory layout is incompatible, " 279 "possibly due to high-entropy ASLR.\n" 280 "Re-execing with fixed virtual address space.\n" 281 "N.B. reducing ASLR entropy is preferable.\n"); 282 CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1); 283 reexec = true; 284 } else { 285 VReport(1, 286 "FATAL: ThreadSanitizer: memory layout is incompatible, " 287 "even though ASLR is disabled.\n" 288 "Please file a bug.\n"); 289 Die(); 290 } 291 } 292 # endif // SANITIZER_LINUX 293 294 if (reexec) 295 ReExec(); 296 } 297 # endif 298 299 void InitializePlatformEarly() { 300 vmaSize = 301 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 302 #if defined(__aarch64__) 303 # if !SANITIZER_GO 304 if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { 305 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 306 Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize); 307 Die(); 308 } 309 #else 310 if (vmaSize != 48) { 311 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 312 Printf("FATAL: Found %zd - Supported 48\n", vmaSize); 313 Die(); 314 } 315 #endif 316 #elif SANITIZER_LOONGARCH64 317 # if !SANITIZER_GO 318 if (vmaSize != 47) { 319 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 320 Printf("FATAL: Found %zd - Supported 47\n", vmaSize); 321 Die(); 322 } 323 # else 324 if (vmaSize != 47) { 325 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 326 Printf("FATAL: Found %zd - Supported 47\n", vmaSize); 327 Die(); 328 } 329 # endif 330 #elif defined(__powerpc64__) 331 # if !SANITIZER_GO 332 if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) { 333 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 334 Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize); 335 Die(); 336 } 337 # else 338 if (vmaSize != 46 && vmaSize != 47) { 339 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 340 Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize); 341 Die(); 342 } 343 # endif 344 #elif defined(__mips64) 345 # if !SANITIZER_GO 346 if (vmaSize != 40) { 347 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 348 Printf("FATAL: Found %zd - Supported 40\n", vmaSize); 349 Die(); 350 } 351 # else 352 if (vmaSize != 47) { 353 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 354 Printf("FATAL: Found %zd - Supported 47\n", vmaSize); 355 Die(); 356 } 357 # endif 358 # elif SANITIZER_RISCV64 359 // the bottom half of vma is allocated for userspace 360 vmaSize = vmaSize + 1; 361 # if !SANITIZER_GO 362 if (vmaSize != 39 && vmaSize != 48) { 363 Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); 364 Printf("FATAL: Found %zd - Supported 39 and 48\n", vmaSize); 365 Die(); 366 } 367 # endif 368 # endif 369 370 # if !SANITIZER_GO 371 ReExecIfNeeded(); 372 # endif 373 } 374 375 void InitializePlatform() { 376 DisableCoreDumperIfNecessary(); 377 378 // Go maps shadow memory lazily and works fine with limited address space. 379 // Unlimited stack is not a problem as well, because the executable 380 // is not compiled with -pie. 381 #if !SANITIZER_GO 382 { 383 # if SANITIZER_LINUX && (defined(__aarch64__) || defined(__loongarch_lp64)) 384 // Initialize the xor key used in {sig}{set,long}jump. 385 InitializeLongjmpXorKey(); 386 # endif 387 } 388 389 // Earlier initialization steps already re-exec'ed until we got a compatible 390 // memory layout, so we don't expect any more issues here. 391 if (!CheckAndProtect(true, true, true)) { 392 Printf( 393 "FATAL: ThreadSanitizer: unexpectedly found incompatible memory " 394 "layout.\n"); 395 Printf("FATAL: Please file a bug.\n"); 396 Die(); 397 } 398 399 InitTlsSize(); 400 #endif // !SANITIZER_GO 401 } 402 403 #if !SANITIZER_GO 404 // Extract file descriptors passed to glibc internal __res_iclose function. 405 // This is required to properly "close" the fds, because we do not see internal 406 // closes within glibc. The code is a pure hack. 407 int ExtractResolvFDs(void *state, int *fds, int nfd) { 408 #if SANITIZER_LINUX && !SANITIZER_ANDROID 409 int cnt = 0; 410 struct __res_state *statp = (struct __res_state*)state; 411 for (int i = 0; i < MAXNS && cnt < nfd; i++) { 412 if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1) 413 fds[cnt++] = statp->_u._ext.nssocks[i]; 414 } 415 return cnt; 416 #else 417 return 0; 418 #endif 419 } 420 421 // Extract file descriptors passed via UNIX domain sockets. 422 // This is required to properly handle "open" of these fds. 423 // see 'man recvmsg' and 'man 3 cmsg'. 424 int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) { 425 int res = 0; 426 msghdr *msg = (msghdr*)msgp; 427 struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg); 428 for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 429 if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) 430 continue; 431 int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]); 432 for (int i = 0; i < n; i++) { 433 fds[res++] = ((int*)CMSG_DATA(cmsg))[i]; 434 if (res == nfd) 435 return res; 436 } 437 } 438 return res; 439 } 440 441 // Reverse operation of libc stack pointer mangling 442 static uptr UnmangleLongJmpSp(uptr mangled_sp) { 443 #if defined(__x86_64__) 444 # if SANITIZER_LINUX 445 // Reverse of: 446 // xor %fs:0x30, %rsi 447 // rol $0x11, %rsi 448 uptr sp; 449 asm("ror $0x11, %0 \n" 450 "xor %%fs:0x30, %0 \n" 451 : "=r" (sp) 452 : "0" (mangled_sp)); 453 return sp; 454 # else 455 return mangled_sp; 456 # endif 457 #elif defined(__aarch64__) 458 # if SANITIZER_LINUX 459 return mangled_sp ^ longjmp_xor_key; 460 # else 461 return mangled_sp; 462 # endif 463 #elif defined(__loongarch_lp64) 464 return mangled_sp ^ longjmp_xor_key; 465 #elif defined(__powerpc64__) 466 // Reverse of: 467 // ld r4, -28696(r13) 468 // xor r4, r3, r4 469 uptr xor_key; 470 asm("ld %0, -28696(%%r13)" : "=r" (xor_key)); 471 return mangled_sp ^ xor_key; 472 #elif defined(__mips__) 473 return mangled_sp; 474 # elif SANITIZER_RISCV64 475 return mangled_sp; 476 # elif defined(__s390x__) 477 // tcbhead_t.stack_guard 478 uptr xor_key = ((uptr *)__builtin_thread_pointer())[5]; 479 return mangled_sp ^ xor_key; 480 # else 481 # error "Unknown platform" 482 # endif 483 } 484 485 #if SANITIZER_NETBSD 486 # ifdef __x86_64__ 487 # define LONG_JMP_SP_ENV_SLOT 6 488 # else 489 # error unsupported 490 # endif 491 #elif defined(__powerpc__) 492 # define LONG_JMP_SP_ENV_SLOT 0 493 #elif SANITIZER_FREEBSD 494 # ifdef __aarch64__ 495 # define LONG_JMP_SP_ENV_SLOT 1 496 # else 497 # define LONG_JMP_SP_ENV_SLOT 2 498 # endif 499 #elif SANITIZER_LINUX 500 # ifdef __aarch64__ 501 # define LONG_JMP_SP_ENV_SLOT 13 502 # elif defined(__loongarch__) 503 # define LONG_JMP_SP_ENV_SLOT 1 504 # elif defined(__mips64) 505 # define LONG_JMP_SP_ENV_SLOT 1 506 # elif SANITIZER_RISCV64 507 # define LONG_JMP_SP_ENV_SLOT 13 508 # elif defined(__s390x__) 509 # define LONG_JMP_SP_ENV_SLOT 9 510 # else 511 # define LONG_JMP_SP_ENV_SLOT 6 512 # endif 513 #endif 514 515 uptr ExtractLongJmpSp(uptr *env) { 516 uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT]; 517 return UnmangleLongJmpSp(mangled_sp); 518 } 519 520 #if INIT_LONGJMP_XOR_KEY 521 // GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp 522 // functions) by XORing them with a random key. For AArch64 it is a global 523 // variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by 524 // issuing a setjmp and XORing the SP pointer values to derive the key. 525 static void InitializeLongjmpXorKey() { 526 // 1. Call REAL(setjmp), which stores the mangled SP in env. 527 jmp_buf env; 528 REAL(_setjmp)(env); 529 530 // 2. Retrieve vanilla/mangled SP. 531 uptr sp; 532 #ifdef __loongarch__ 533 asm("move %0, $sp" : "=r" (sp)); 534 #else 535 asm("mov %0, sp" : "=r" (sp)); 536 #endif 537 uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT]; 538 539 // 3. xor SPs to obtain key. 540 longjmp_xor_key = mangled_sp ^ sp; 541 } 542 #endif 543 544 extern "C" void __tsan_tls_initialization() {} 545 546 void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) { 547 // Check that the thr object is in tls; 548 const uptr thr_beg = (uptr)thr; 549 const uptr thr_end = (uptr)thr + sizeof(*thr); 550 CHECK_GE(thr_beg, tls_addr); 551 CHECK_LE(thr_beg, tls_addr + tls_size); 552 CHECK_GE(thr_end, tls_addr); 553 CHECK_LE(thr_end, tls_addr + tls_size); 554 // Since the thr object is huge, skip it. 555 const uptr pc = StackTrace::GetNextInstructionPc( 556 reinterpret_cast<uptr>(__tsan_tls_initialization)); 557 MemoryRangeImitateWrite(thr, pc, tls_addr, thr_beg - tls_addr); 558 MemoryRangeImitateWrite(thr, pc, thr_end, tls_addr + tls_size - thr_end); 559 } 560 561 // Note: this function runs with async signals enabled, 562 // so it must not touch any tsan state. 563 int call_pthread_cancel_with_cleanup(int (*fn)(void *arg), 564 void (*cleanup)(void *arg), void *arg) { 565 // pthread_cleanup_push/pop are hardcore macros mess. 566 // We can't intercept nor call them w/o including pthread.h. 567 int res; 568 pthread_cleanup_push(cleanup, arg); 569 res = fn(arg); 570 pthread_cleanup_pop(0); 571 return res; 572 } 573 #endif // !SANITIZER_GO 574 575 #if !SANITIZER_GO 576 void ReplaceSystemMalloc() { } 577 #endif 578 579 #if !SANITIZER_GO 580 #if SANITIZER_ANDROID 581 // On Android, one thread can call intercepted functions after 582 // DestroyThreadState(), so add a fake thread state for "dead" threads. 583 static ThreadState *dead_thread_state = nullptr; 584 585 ThreadState *cur_thread() { 586 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 587 if (thr == nullptr) { 588 __sanitizer_sigset_t emptyset; 589 internal_sigfillset(&emptyset); 590 __sanitizer_sigset_t oldset; 591 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 592 thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 593 if (thr == nullptr) { 594 thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState), 595 "ThreadState")); 596 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 597 if (dead_thread_state == nullptr) { 598 dead_thread_state = reinterpret_cast<ThreadState*>( 599 MmapOrDie(sizeof(ThreadState), "ThreadState")); 600 dead_thread_state->fast_state.SetIgnoreBit(); 601 dead_thread_state->ignore_interceptors = 1; 602 dead_thread_state->is_dead = true; 603 *const_cast<u32*>(&dead_thread_state->tid) = -1; 604 CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState), 605 PROT_READ)); 606 } 607 } 608 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 609 } 610 return thr; 611 } 612 613 void set_cur_thread(ThreadState *thr) { 614 *get_android_tls_ptr() = reinterpret_cast<uptr>(thr); 615 } 616 617 void cur_thread_finalize() { 618 __sanitizer_sigset_t emptyset; 619 internal_sigfillset(&emptyset); 620 __sanitizer_sigset_t oldset; 621 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset)); 622 ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr()); 623 if (thr != dead_thread_state) { 624 *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state); 625 UnmapOrDie(thr, sizeof(ThreadState)); 626 } 627 CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr)); 628 } 629 #endif // SANITIZER_ANDROID 630 #endif // if !SANITIZER_GO 631 632 } // namespace __tsan 633 634 #endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD 635