1 //===-- sanitizer_linux_libcdep.cpp ---------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between AddressSanitizer and ThreadSanitizer 10 // run-time libraries and implements linux-specific functions from 11 // sanitizer_libc.h. 12 //===----------------------------------------------------------------------===// 13 14 #include "sanitizer_platform.h" 15 16 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ 17 SANITIZER_SOLARIS 18 19 # include "sanitizer_allocator_internal.h" 20 # include "sanitizer_atomic.h" 21 # include "sanitizer_common.h" 22 # include "sanitizer_file.h" 23 # include "sanitizer_flags.h" 24 # include "sanitizer_getauxval.h" 25 # include "sanitizer_glibc_version.h" 26 # include "sanitizer_linux.h" 27 # include "sanitizer_placement_new.h" 28 # include "sanitizer_procmaps.h" 29 # include "sanitizer_solaris.h" 30 31 # if SANITIZER_NETBSD 32 # define _RTLD_SOURCE // for __lwp_gettcb_fast() / __lwp_getprivate_fast() 33 # endif 34 35 # include <dlfcn.h> // for dlsym() 36 # include <link.h> 37 # include <pthread.h> 38 # include <signal.h> 39 # include <sys/mman.h> 40 # include <sys/resource.h> 41 # include <syslog.h> 42 43 # if SANITIZER_GLIBC 44 # include <gnu/libc-version.h> 45 # endif 46 47 # if !defined(ElfW) 48 # define ElfW(type) Elf_##type 49 # endif 50 51 # if SANITIZER_FREEBSD 52 # include <pthread_np.h> 53 # include <sys/auxv.h> 54 # include <sys/sysctl.h> 55 # define pthread_getattr_np pthread_attr_get_np 56 // The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before 57 // that, it was never implemented. So just define it to zero. 58 # undef MAP_NORESERVE 59 # define MAP_NORESERVE 0 60 extern const Elf_Auxinfo *__elf_aux_vector __attribute__((weak)); 61 extern "C" int __sys_sigaction(int signum, const struct sigaction *act, 62 struct sigaction *oldact); 63 # endif 64 65 # if SANITIZER_NETBSD 66 # include <lwp.h> 67 # include <sys/sysctl.h> 68 # include <sys/tls.h> 69 # endif 70 71 # if SANITIZER_SOLARIS 72 # include <stddef.h> 73 # include <stdlib.h> 74 # include <thread.h> 75 # endif 76 77 # if !SANITIZER_ANDROID 78 # include <elf.h> 79 # include <unistd.h> 80 # endif 81 82 namespace __sanitizer { 83 84 SANITIZER_WEAK_ATTRIBUTE int real_sigaction(int signum, const void *act, 85 void *oldact); 86 87 int internal_sigaction(int signum, const void *act, void *oldact) { 88 # if SANITIZER_FREEBSD 89 // On FreeBSD, call the sigaction syscall directly (part of libsys in FreeBSD 90 // 15) since the libc version goes via a global interposing table. Due to 91 // library initialization order the table can be relocated after the call to 92 // InitializeDeadlySignals() which then crashes when dereferencing the 93 // uninitialized pointer in libc. 94 return __sys_sigaction(signum, (const struct sigaction *)act, 95 (struct sigaction *)oldact); 96 # else 97 # if !SANITIZER_GO 98 if (&real_sigaction) 99 return real_sigaction(signum, act, oldact); 100 # endif 101 return sigaction(signum, (const struct sigaction *)act, 102 (struct sigaction *)oldact); 103 # endif 104 } 105 106 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 107 uptr *stack_bottom) { 108 CHECK(stack_top); 109 CHECK(stack_bottom); 110 if (at_initialization) { 111 // This is the main thread. Libpthread may not be initialized yet. 112 struct rlimit rl; 113 CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0); 114 115 // Find the mapping that contains a stack variable. 116 MemoryMappingLayout proc_maps(/*cache_enabled*/ true); 117 if (proc_maps.Error()) { 118 *stack_top = *stack_bottom = 0; 119 return; 120 } 121 MemoryMappedSegment segment; 122 uptr prev_end = 0; 123 while (proc_maps.Next(&segment)) { 124 if ((uptr)&rl < segment.end) 125 break; 126 prev_end = segment.end; 127 } 128 CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end); 129 130 // Get stacksize from rlimit, but clip it so that it does not overlap 131 // with other mappings. 132 uptr stacksize = rl.rlim_cur; 133 if (stacksize > segment.end - prev_end) 134 stacksize = segment.end - prev_end; 135 // When running with unlimited stack size, we still want to set some limit. 136 // The unlimited stack size is caused by 'ulimit -s unlimited'. 137 // Also, for some reason, GNU make spawns subprocesses with unlimited stack. 138 if (stacksize > kMaxThreadStackSize) 139 stacksize = kMaxThreadStackSize; 140 *stack_top = segment.end; 141 *stack_bottom = segment.end - stacksize; 142 143 uptr maxAddr = GetMaxUserVirtualAddress(); 144 // Edge case: the stack mapping on some systems may be off-by-one e.g., 145 // fffffffdf000-1000000000000 rw-p 00000000 00:00 0 [stack] 146 // instead of: 147 // fffffffdf000- ffffffffffff 148 // The out-of-range stack_top can result in an invalid shadow address 149 // calculation, since those usually assume the parameters are in range. 150 if (*stack_top == maxAddr + 1) 151 *stack_top = maxAddr; 152 else 153 CHECK_LE(*stack_top, maxAddr); 154 155 return; 156 } 157 uptr stacksize = 0; 158 void *stackaddr = nullptr; 159 # if SANITIZER_SOLARIS 160 stack_t ss; 161 CHECK_EQ(thr_stksegment(&ss), 0); 162 stacksize = ss.ss_size; 163 stackaddr = (char *)ss.ss_sp - stacksize; 164 # else // !SANITIZER_SOLARIS 165 pthread_attr_t attr; 166 pthread_attr_init(&attr); 167 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); 168 internal_pthread_attr_getstack(&attr, &stackaddr, &stacksize); 169 pthread_attr_destroy(&attr); 170 # endif // SANITIZER_SOLARIS 171 172 *stack_top = (uptr)stackaddr + stacksize; 173 *stack_bottom = (uptr)stackaddr; 174 } 175 176 # if !SANITIZER_GO 177 bool SetEnv(const char *name, const char *value) { 178 void *f = dlsym(RTLD_NEXT, "setenv"); 179 if (!f) 180 return false; 181 typedef int (*setenv_ft)(const char *name, const char *value, int overwrite); 182 setenv_ft setenv_f; 183 CHECK_EQ(sizeof(setenv_f), sizeof(f)); 184 internal_memcpy(&setenv_f, &f, sizeof(f)); 185 return setenv_f(name, value, 1) == 0; 186 } 187 # endif 188 189 // True if we can use dlpi_tls_data. glibc before 2.25 may leave NULL (BZ 190 // #19826) so dlpi_tls_data cannot be used. 191 // 192 // musl before 1.2.3 and FreeBSD as of 12.2 incorrectly set dlpi_tls_data to 193 // the TLS initialization image 194 // https://bugs.freebsd.org/bugzilla/show_bug.cgi?id=254774 195 __attribute__((unused)) static int g_use_dlpi_tls_data; 196 197 # if SANITIZER_GLIBC && !SANITIZER_GO 198 static void GetGLibcVersion(int *major, int *minor, int *patch) { 199 const char *p = gnu_get_libc_version(); 200 *major = internal_simple_strtoll(p, &p, 10); 201 // Caller does not expect anything else. 202 CHECK_EQ(*major, 2); 203 *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; 204 *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0; 205 } 206 207 static uptr ThreadDescriptorSizeFallback() { 208 # if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || \ 209 SANITIZER_RISCV64 210 int major; 211 int minor; 212 int patch; 213 GetGLibcVersion(&major, &minor, &patch); 214 # endif 215 216 # if defined(__x86_64__) || defined(__i386__) || defined(__arm__) 217 /* sizeof(struct pthread) values from various glibc versions. */ 218 if (SANITIZER_X32) 219 return 1728; // Assume only one particular version for x32. 220 // For ARM sizeof(struct pthread) changed in Glibc 2.23. 221 if (SANITIZER_ARM) 222 return minor <= 22 ? 1120 : 1216; 223 if (minor <= 3) 224 return FIRST_32_SECOND_64(1104, 1696); 225 if (minor == 4) 226 return FIRST_32_SECOND_64(1120, 1728); 227 if (minor == 5) 228 return FIRST_32_SECOND_64(1136, 1728); 229 if (minor <= 9) 230 return FIRST_32_SECOND_64(1136, 1712); 231 if (minor == 10) 232 return FIRST_32_SECOND_64(1168, 1776); 233 if (minor == 11 || (minor == 12 && patch == 1)) 234 return FIRST_32_SECOND_64(1168, 2288); 235 if (minor <= 14) 236 return FIRST_32_SECOND_64(1168, 2304); 237 if (minor < 32) // Unknown version 238 return FIRST_32_SECOND_64(1216, 2304); 239 // minor == 32 240 return FIRST_32_SECOND_64(1344, 2496); 241 # endif 242 243 # if SANITIZER_RISCV64 244 // TODO: consider adding an optional runtime check for an unknown (untested) 245 // glibc version 246 if (minor <= 28) // WARNING: the highest tested version is 2.29 247 return 1772; // no guarantees for this one 248 if (minor <= 31) 249 return 1772; // tested against glibc 2.29, 2.31 250 return 1936; // tested against glibc 2.32 251 # endif 252 253 # if defined(__s390__) || defined(__sparc__) 254 // The size of a prefix of TCB including pthread::{specific_1stblock,specific} 255 // suffices. Just return offsetof(struct pthread, specific_used), which hasn't 256 // changed since 2007-05. Technically this applies to i386/x86_64 as well but 257 // we call _dl_get_tls_static_info and need the precise size of struct 258 // pthread. 259 return FIRST_32_SECOND_64(524, 1552); 260 # endif 261 262 # if defined(__mips__) 263 // TODO(sagarthakur): add more values as per different glibc versions. 264 return FIRST_32_SECOND_64(1152, 1776); 265 # endif 266 267 # if SANITIZER_LOONGARCH64 268 return 1856; // from glibc 2.36 269 # endif 270 271 # if defined(__aarch64__) 272 // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. 273 return 1776; 274 # endif 275 276 # if defined(__powerpc64__) 277 return 1776; // from glibc.ppc64le 2.20-8.fc21 278 # endif 279 } 280 # endif // SANITIZER_GLIBC && !SANITIZER_GO 281 282 # if SANITIZER_FREEBSD && !SANITIZER_GO 283 // FIXME: Implementation is very GLIBC specific, but it's used by FreeBSD. 284 static uptr ThreadDescriptorSizeFallback() { 285 # if defined(__s390__) || defined(__sparc__) 286 // The size of a prefix of TCB including pthread::{specific_1stblock,specific} 287 // suffices. Just return offsetof(struct pthread, specific_used), which hasn't 288 // changed since 2007-05. Technically this applies to i386/x86_64 as well but 289 // we call _dl_get_tls_static_info and need the precise size of struct 290 // pthread. 291 return FIRST_32_SECOND_64(524, 1552); 292 # endif 293 294 # if defined(__mips__) 295 // TODO(sagarthakur): add more values as per different glibc versions. 296 return FIRST_32_SECOND_64(1152, 1776); 297 # endif 298 299 # if SANITIZER_LOONGARCH64 300 return 1856; // from glibc 2.36 301 # endif 302 303 # if defined(__aarch64__) 304 // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22. 305 return 1776; 306 # endif 307 308 # if defined(__powerpc64__) 309 return 1776; // from glibc.ppc64le 2.20-8.fc21 310 # endif 311 312 return 0; 313 } 314 # endif // SANITIZER_FREEBSD && !SANITIZER_GO 315 316 # if (SANITIZER_FREEBSD || SANITIZER_GLIBC) && !SANITIZER_GO 317 // On glibc x86_64, ThreadDescriptorSize() needs to be precise due to the usage 318 // of g_tls_size. On other targets, ThreadDescriptorSize() is only used by lsan 319 // to get the pointer to thread-specific data keys in the thread control block. 320 // sizeof(struct pthread) from glibc. 321 static uptr thread_descriptor_size; 322 323 uptr ThreadDescriptorSize() { return thread_descriptor_size; } 324 325 # if SANITIZER_GLIBC 326 __attribute__((unused)) static size_t g_tls_size; 327 # endif 328 329 void InitTlsSize() { 330 # if SANITIZER_GLIBC 331 int major, minor, patch; 332 GetGLibcVersion(&major, &minor, &patch); 333 g_use_dlpi_tls_data = major == 2 && minor >= 25; 334 335 if (major == 2 && minor >= 34) { 336 // _thread_db_sizeof_pthread is a GLIBC_PRIVATE symbol that is exported in 337 // glibc 2.34 and later. 338 if (unsigned *psizeof = static_cast<unsigned *>( 339 dlsym(RTLD_DEFAULT, "_thread_db_sizeof_pthread"))) { 340 thread_descriptor_size = *psizeof; 341 } 342 } 343 344 # if defined(__aarch64__) || defined(__x86_64__) || \ 345 defined(__powerpc64__) || defined(__loongarch__) 346 auto *get_tls_static_info = (void (*)(size_t *, size_t *))dlsym( 347 RTLD_DEFAULT, "_dl_get_tls_static_info"); 348 size_t tls_align; 349 // Can be null if static link. 350 if (get_tls_static_info) 351 get_tls_static_info(&g_tls_size, &tls_align); 352 # endif 353 354 # endif // SANITIZER_GLIBC 355 356 if (!thread_descriptor_size) 357 thread_descriptor_size = ThreadDescriptorSizeFallback(); 358 } 359 360 # if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 || \ 361 SANITIZER_LOONGARCH64 362 // TlsPreTcbSize includes size of struct pthread_descr and size of tcb 363 // head structure. It lies before the static tls blocks. 364 static uptr TlsPreTcbSize() { 365 # if defined(__mips__) 366 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 367 # elif defined(__powerpc64__) 368 const uptr kTcbHead = 88; // sizeof (tcbhead_t) 369 # elif SANITIZER_RISCV64 370 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 371 # elif SANITIZER_LOONGARCH64 372 const uptr kTcbHead = 16; // sizeof (tcbhead_t) 373 # endif 374 const uptr kTlsAlign = 16; 375 const uptr kTlsPreTcbSize = 376 RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign); 377 return kTlsPreTcbSize; 378 } 379 # endif 380 # else // (SANITIZER_FREEBSD || SANITIZER_GLIBC) && !SANITIZER_GO 381 void InitTlsSize() {} 382 uptr ThreadDescriptorSize() { return 0; } 383 # endif // (SANITIZER_FREEBSD || SANITIZER_GLIBC) && !SANITIZER_GO 384 385 # if (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS) && \ 386 !SANITIZER_ANDROID && !SANITIZER_GO 387 namespace { 388 struct TlsBlock { 389 uptr begin, end, align; 390 size_t tls_modid; 391 bool operator<(const TlsBlock &rhs) const { return begin < rhs.begin; } 392 }; 393 } // namespace 394 395 # ifdef __s390__ 396 extern "C" uptr __tls_get_offset(void *arg); 397 398 static uptr TlsGetOffset(uptr ti_module, uptr ti_offset) { 399 // The __tls_get_offset ABI requires %r12 to point to GOT and %r2 to be an 400 // offset of a struct tls_index inside GOT. We don't possess either of the 401 // two, so violate the letter of the "ELF Handling For Thread-Local 402 // Storage" document and assume that the implementation just dereferences 403 // %r2 + %r12. 404 uptr tls_index[2] = {ti_module, ti_offset}; 405 register uptr r2 asm("2") = 0; 406 register void *r12 asm("12") = tls_index; 407 asm("basr %%r14, %[__tls_get_offset]" 408 : "+r"(r2) 409 : [__tls_get_offset] "r"(__tls_get_offset), "r"(r12) 410 : "memory", "cc", "0", "1", "3", "4", "5", "14"); 411 return r2; 412 } 413 # else 414 extern "C" void *__tls_get_addr(size_t *); 415 # endif 416 417 static size_t main_tls_modid; 418 419 static int CollectStaticTlsBlocks(struct dl_phdr_info *info, size_t size, 420 void *data) { 421 size_t tls_modid; 422 # if SANITIZER_SOLARIS 423 // dlpi_tls_modid is only available since Solaris 11.4 SRU 10. Use 424 // dlinfo(RTLD_DI_LINKMAP) instead which works on all of Solaris 11.3, 425 // 11.4, and Illumos. The tlsmodid of the executable was changed to 1 in 426 // 11.4 to match other implementations. 427 if (size >= offsetof(dl_phdr_info_test, dlpi_tls_modid)) 428 main_tls_modid = 1; 429 else 430 main_tls_modid = 0; 431 g_use_dlpi_tls_data = 0; 432 Rt_map *map; 433 dlinfo(RTLD_SELF, RTLD_DI_LINKMAP, &map); 434 tls_modid = map->rt_tlsmodid; 435 # else 436 main_tls_modid = 1; 437 tls_modid = info->dlpi_tls_modid; 438 # endif 439 440 if (tls_modid < main_tls_modid) 441 return 0; 442 uptr begin; 443 # if !SANITIZER_SOLARIS 444 begin = (uptr)info->dlpi_tls_data; 445 # endif 446 if (!g_use_dlpi_tls_data) { 447 // Call __tls_get_addr as a fallback. This forces TLS allocation on glibc 448 // and FreeBSD. 449 # ifdef __s390__ 450 begin = (uptr)__builtin_thread_pointer() + TlsGetOffset(tls_modid, 0); 451 # else 452 size_t mod_and_off[2] = {tls_modid, 0}; 453 begin = (uptr)__tls_get_addr(mod_and_off); 454 # endif 455 } 456 for (unsigned i = 0; i != info->dlpi_phnum; ++i) 457 if (info->dlpi_phdr[i].p_type == PT_TLS) { 458 static_cast<InternalMmapVector<TlsBlock> *>(data)->push_back( 459 TlsBlock{begin, begin + info->dlpi_phdr[i].p_memsz, 460 info->dlpi_phdr[i].p_align, tls_modid}); 461 break; 462 } 463 return 0; 464 } 465 466 __attribute__((unused)) static void GetStaticTlsBoundary(uptr *addr, uptr *size, 467 uptr *align) { 468 InternalMmapVector<TlsBlock> ranges; 469 dl_iterate_phdr(CollectStaticTlsBlocks, &ranges); 470 uptr len = ranges.size(); 471 Sort(ranges.begin(), len); 472 // Find the range with tls_modid == main_tls_modid. For glibc, because 473 // libc.so uses PT_TLS, this module is guaranteed to exist and is one of 474 // the initially loaded modules. 475 uptr one = 0; 476 while (one != len && ranges[one].tls_modid != main_tls_modid) ++one; 477 if (one == len) { 478 // This may happen with musl if no module uses PT_TLS. 479 *addr = 0; 480 *size = 0; 481 *align = 1; 482 return; 483 } 484 // Find the maximum consecutive ranges. We consider two modules consecutive if 485 // the gap is smaller than the alignment of the latter range. The dynamic 486 // loader places static TLS blocks this way not to waste space. 487 uptr l = one; 488 *align = ranges[l].align; 489 while (l != 0 && ranges[l].begin < ranges[l - 1].end + ranges[l].align) 490 *align = Max(*align, ranges[--l].align); 491 uptr r = one + 1; 492 while (r != len && ranges[r].begin < ranges[r - 1].end + ranges[r].align) 493 *align = Max(*align, ranges[r++].align); 494 *addr = ranges[l].begin; 495 *size = ranges[r - 1].end - ranges[l].begin; 496 } 497 # endif // (x86_64 || i386 || mips || ...) && (SANITIZER_FREEBSD || 498 // SANITIZER_LINUX) && !SANITIZER_ANDROID && !SANITIZER_GO 499 500 # if SANITIZER_NETBSD 501 static struct tls_tcb *ThreadSelfTlsTcb() { 502 struct tls_tcb *tcb = nullptr; 503 # ifdef __HAVE___LWP_GETTCB_FAST 504 tcb = (struct tls_tcb *)__lwp_gettcb_fast(); 505 # elif defined(__HAVE___LWP_GETPRIVATE_FAST) 506 tcb = (struct tls_tcb *)__lwp_getprivate_fast(); 507 # endif 508 return tcb; 509 } 510 511 uptr ThreadSelf() { return (uptr)ThreadSelfTlsTcb()->tcb_pthread; } 512 513 int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) { 514 const Elf_Phdr *hdr = info->dlpi_phdr; 515 const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum; 516 517 for (; hdr != last_hdr; ++hdr) { 518 if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) { 519 *(uptr *)data = hdr->p_memsz; 520 break; 521 } 522 } 523 return 0; 524 } 525 # endif // SANITIZER_NETBSD 526 527 # if SANITIZER_ANDROID 528 // Bionic provides this API since S. 529 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_get_static_tls_bounds(void **, 530 void **); 531 # endif 532 533 # if !SANITIZER_GO 534 static void GetTls(uptr *addr, uptr *size) { 535 # if SANITIZER_ANDROID 536 if (&__libc_get_static_tls_bounds) { 537 void *start_addr; 538 void *end_addr; 539 __libc_get_static_tls_bounds(&start_addr, &end_addr); 540 *addr = reinterpret_cast<uptr>(start_addr); 541 *size = 542 reinterpret_cast<uptr>(end_addr) - reinterpret_cast<uptr>(start_addr); 543 } else { 544 *addr = 0; 545 *size = 0; 546 } 547 # elif SANITIZER_GLIBC && defined(__x86_64__) 548 // For aarch64 and x86-64, use an O(1) approach which requires relatively 549 // precise ThreadDescriptorSize. g_tls_size was initialized in InitTlsSize. 550 # if SANITIZER_X32 551 asm("mov %%fs:8,%0" : "=r"(*addr)); 552 # else 553 asm("mov %%fs:16,%0" : "=r"(*addr)); 554 # endif 555 *size = g_tls_size; 556 *addr -= *size; 557 *addr += ThreadDescriptorSize(); 558 # elif SANITIZER_GLIBC && defined(__aarch64__) 559 *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) - 560 ThreadDescriptorSize(); 561 *size = g_tls_size + ThreadDescriptorSize(); 562 # elif SANITIZER_GLIBC && defined(__loongarch__) 563 # ifdef __clang__ 564 *addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) - 565 ThreadDescriptorSize(); 566 # else 567 asm("or %0,$tp,$zero" : "=r"(*addr)); 568 *addr -= ThreadDescriptorSize(); 569 # endif 570 *size = g_tls_size + ThreadDescriptorSize(); 571 # elif SANITIZER_GLIBC && defined(__powerpc64__) 572 // Workaround for glibc<2.25(?). 2.27 is known to not need this. 573 uptr tp; 574 asm("addi %0,13,-0x7000" : "=r"(tp)); 575 const uptr pre_tcb_size = TlsPreTcbSize(); 576 *addr = tp - pre_tcb_size; 577 *size = g_tls_size + pre_tcb_size; 578 # elif SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_SOLARIS 579 uptr align; 580 GetStaticTlsBoundary(addr, size, &align); 581 # if defined(__x86_64__) || defined(__i386__) || defined(__s390__) || \ 582 defined(__sparc__) 583 if (SANITIZER_GLIBC) { 584 # if defined(__x86_64__) || defined(__i386__) 585 align = Max<uptr>(align, 64); 586 # else 587 align = Max<uptr>(align, 16); 588 # endif 589 } 590 const uptr tp = RoundUpTo(*addr + *size, align); 591 592 // lsan requires the range to additionally cover the static TLS surplus 593 // (elf/dl-tls.c defines 1664). Otherwise there may be false positives for 594 // allocations only referenced by tls in dynamically loaded modules. 595 if (SANITIZER_GLIBC) 596 *size += 1644; 597 else if (SANITIZER_FREEBSD) 598 *size += 128; // RTLD_STATIC_TLS_EXTRA 599 600 // Extend the range to include the thread control block. On glibc, lsan needs 601 // the range to include pthread::{specific_1stblock,specific} so that 602 // allocations only referenced by pthread_setspecific can be scanned. This may 603 // underestimate by at most TLS_TCB_ALIGN-1 bytes but it should be fine 604 // because the number of bytes after pthread::specific is larger. 605 *addr = tp - RoundUpTo(*size, align); 606 *size = tp - *addr + ThreadDescriptorSize(); 607 # else 608 if (SANITIZER_GLIBC) 609 *size += 1664; 610 else if (SANITIZER_FREEBSD) 611 *size += 128; // RTLD_STATIC_TLS_EXTRA 612 # if defined(__mips__) || defined(__powerpc64__) || SANITIZER_RISCV64 613 const uptr pre_tcb_size = TlsPreTcbSize(); 614 *addr -= pre_tcb_size; 615 *size += pre_tcb_size; 616 # else 617 // arm and aarch64 reserve two words at TP, so this underestimates the range. 618 // However, this is sufficient for the purpose of finding the pointers to 619 // thread-specific data keys. 620 const uptr tcb_size = ThreadDescriptorSize(); 621 *addr -= tcb_size; 622 *size += tcb_size; 623 # endif 624 # endif 625 # elif SANITIZER_NETBSD 626 struct tls_tcb *const tcb = ThreadSelfTlsTcb(); 627 *addr = 0; 628 *size = 0; 629 if (tcb != 0) { 630 // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program). 631 // ld.elf_so hardcodes the index 1. 632 dl_iterate_phdr(GetSizeFromHdr, size); 633 634 if (*size != 0) { 635 // The block has been found and tcb_dtv[1] contains the base address 636 *addr = (uptr)tcb->tcb_dtv[1]; 637 } 638 } 639 # else 640 # error "Unknown OS" 641 # endif 642 } 643 # endif 644 645 # if !SANITIZER_GO 646 uptr GetTlsSize() { 647 # if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \ 648 SANITIZER_SOLARIS 649 uptr addr, size; 650 GetTls(&addr, &size); 651 return size; 652 # else 653 return 0; 654 # endif 655 } 656 # endif 657 658 void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, 659 uptr *tls_begin, uptr *tls_end) { 660 # if SANITIZER_GO 661 // Stub implementation for Go. 662 *stk_begin = 0; 663 *stk_end = 0; 664 *tls_begin = 0; 665 *tls_end = 0; 666 # else 667 uptr tls_addr = 0; 668 uptr tls_size = 0; 669 GetTls(&tls_addr, &tls_size); 670 *tls_begin = tls_addr; 671 *tls_end = tls_addr + tls_size; 672 673 uptr stack_top, stack_bottom; 674 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom); 675 *stk_begin = stack_bottom; 676 *stk_end = stack_top; 677 678 if (!main) { 679 // If stack and tls intersect, make them non-intersecting. 680 if (*tls_begin > *stk_begin && *tls_begin < *stk_end) { 681 if (*stk_end < *tls_end) 682 *tls_end = *stk_end; 683 *stk_end = *tls_begin; 684 } 685 } 686 # endif 687 } 688 689 # if !SANITIZER_FREEBSD 690 typedef ElfW(Phdr) Elf_Phdr; 691 # endif 692 693 struct DlIteratePhdrData { 694 InternalMmapVectorNoCtor<LoadedModule> *modules; 695 bool first; 696 }; 697 698 static int AddModuleSegments(const char *module_name, dl_phdr_info *info, 699 InternalMmapVectorNoCtor<LoadedModule> *modules) { 700 if (module_name[0] == '\0') 701 return 0; 702 LoadedModule cur_module; 703 cur_module.set(module_name, info->dlpi_addr); 704 for (int i = 0; i < (int)info->dlpi_phnum; i++) { 705 const Elf_Phdr *phdr = &info->dlpi_phdr[i]; 706 if (phdr->p_type == PT_LOAD) { 707 uptr cur_beg = info->dlpi_addr + phdr->p_vaddr; 708 uptr cur_end = cur_beg + phdr->p_memsz; 709 bool executable = phdr->p_flags & PF_X; 710 bool writable = phdr->p_flags & PF_W; 711 cur_module.addAddressRange(cur_beg, cur_end, executable, writable); 712 } else if (phdr->p_type == PT_NOTE) { 713 # ifdef NT_GNU_BUILD_ID 714 uptr off = 0; 715 while (off + sizeof(ElfW(Nhdr)) < phdr->p_memsz) { 716 auto *nhdr = reinterpret_cast<const ElfW(Nhdr) *>(info->dlpi_addr + 717 phdr->p_vaddr + off); 718 constexpr auto kGnuNamesz = 4; // "GNU" with NUL-byte. 719 static_assert(kGnuNamesz % 4 == 0, "kGnuNameSize is aligned to 4."); 720 if (nhdr->n_type == NT_GNU_BUILD_ID && nhdr->n_namesz == kGnuNamesz) { 721 if (off + sizeof(ElfW(Nhdr)) + nhdr->n_namesz + nhdr->n_descsz > 722 phdr->p_memsz) { 723 // Something is very wrong, bail out instead of reading potentially 724 // arbitrary memory. 725 break; 726 } 727 const char *name = 728 reinterpret_cast<const char *>(nhdr) + sizeof(*nhdr); 729 if (internal_memcmp(name, "GNU", 3) == 0) { 730 const char *value = reinterpret_cast<const char *>(nhdr) + 731 sizeof(*nhdr) + kGnuNamesz; 732 cur_module.setUuid(value, nhdr->n_descsz); 733 break; 734 } 735 } 736 off += sizeof(*nhdr) + RoundUpTo(nhdr->n_namesz, 4) + 737 RoundUpTo(nhdr->n_descsz, 4); 738 } 739 # endif 740 } 741 } 742 modules->push_back(cur_module); 743 return 0; 744 } 745 746 static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) { 747 DlIteratePhdrData *data = (DlIteratePhdrData *)arg; 748 if (data->first) { 749 InternalMmapVector<char> module_name(kMaxPathLength); 750 data->first = false; 751 // First module is the binary itself. 752 ReadBinaryNameCached(module_name.data(), module_name.size()); 753 return AddModuleSegments(module_name.data(), info, data->modules); 754 } 755 756 if (info->dlpi_name) 757 return AddModuleSegments(info->dlpi_name, info, data->modules); 758 759 return 0; 760 } 761 762 static bool requiresProcmaps() { 763 # if SANITIZER_ANDROID && __ANDROID_API__ <= 22 764 // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken. 765 // The runtime check allows the same library to work with 766 // both K and L (and future) Android releases. 767 return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1; 768 # else 769 return false; 770 # endif 771 } 772 773 static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) { 774 MemoryMappingLayout memory_mapping(/*cache_enabled*/ true); 775 memory_mapping.DumpListOfModules(modules); 776 } 777 778 void ListOfModules::init() { 779 clearOrInit(); 780 if (requiresProcmaps()) { 781 procmapsInit(&modules_); 782 } else { 783 DlIteratePhdrData data = {&modules_, true}; 784 dl_iterate_phdr(dl_iterate_phdr_cb, &data); 785 } 786 } 787 788 // When a custom loader is used, dl_iterate_phdr may not contain the full 789 // list of modules. Allow callers to fall back to using procmaps. 790 void ListOfModules::fallbackInit() { 791 if (!requiresProcmaps()) { 792 clearOrInit(); 793 procmapsInit(&modules_); 794 } else { 795 clear(); 796 } 797 } 798 799 // getrusage does not give us the current RSS, only the max RSS. 800 // Still, this is better than nothing if /proc/self/statm is not available 801 // for some reason, e.g. due to a sandbox. 802 static uptr GetRSSFromGetrusage() { 803 struct rusage usage; 804 if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox. 805 return 0; 806 return usage.ru_maxrss << 10; // ru_maxrss is in Kb. 807 } 808 809 uptr GetRSS() { 810 if (!common_flags()->can_use_proc_maps_statm) 811 return GetRSSFromGetrusage(); 812 fd_t fd = OpenFile("/proc/self/statm", RdOnly); 813 if (fd == kInvalidFd) 814 return GetRSSFromGetrusage(); 815 char buf[64]; 816 uptr len = internal_read(fd, buf, sizeof(buf) - 1); 817 internal_close(fd); 818 if ((sptr)len <= 0) 819 return 0; 820 buf[len] = 0; 821 // The format of the file is: 822 // 1084 89 69 11 0 79 0 823 // We need the second number which is RSS in pages. 824 char *pos = buf; 825 // Skip the first number. 826 while (*pos >= '0' && *pos <= '9') pos++; 827 // Skip whitespaces. 828 while (!(*pos >= '0' && *pos <= '9') && *pos != 0) pos++; 829 // Read the number. 830 uptr rss = 0; 831 while (*pos >= '0' && *pos <= '9') rss = rss * 10 + *pos++ - '0'; 832 return rss * GetPageSizeCached(); 833 } 834 835 // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as 836 // they allocate memory. 837 u32 GetNumberOfCPUs() { 838 # if SANITIZER_FREEBSD || SANITIZER_NETBSD 839 u32 ncpu; 840 int req[2]; 841 uptr len = sizeof(ncpu); 842 req[0] = CTL_HW; 843 req[1] = HW_NCPU; 844 CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0); 845 return ncpu; 846 # elif SANITIZER_SOLARIS 847 return sysconf(_SC_NPROCESSORS_ONLN); 848 # else 849 cpu_set_t CPUs; 850 CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0); 851 return CPU_COUNT(&CPUs); 852 # endif 853 } 854 855 # if SANITIZER_LINUX 856 857 # if SANITIZER_ANDROID 858 static atomic_uint8_t android_log_initialized; 859 860 void AndroidLogInit() { 861 openlog(GetProcessName(), 0, LOG_USER); 862 atomic_store(&android_log_initialized, 1, memory_order_release); 863 } 864 865 static bool ShouldLogAfterPrintf() { 866 return atomic_load(&android_log_initialized, memory_order_acquire); 867 } 868 869 extern "C" SANITIZER_WEAK_ATTRIBUTE int async_safe_write_log(int pri, 870 const char *tag, 871 const char *msg); 872 extern "C" SANITIZER_WEAK_ATTRIBUTE int __android_log_write(int prio, 873 const char *tag, 874 const char *msg); 875 876 // ANDROID_LOG_INFO is 4, but can't be resolved at runtime. 877 # define SANITIZER_ANDROID_LOG_INFO 4 878 879 // async_safe_write_log is a new public version of __libc_write_log that is 880 // used behind syslog. It is preferable to syslog as it will not do any dynamic 881 // memory allocation or formatting. 882 // If the function is not available, syslog is preferred for L+ (it was broken 883 // pre-L) as __android_log_write triggers a racey behavior with the strncpy 884 // interceptor. Fallback to __android_log_write pre-L. 885 void WriteOneLineToSyslog(const char *s) { 886 if (&async_safe_write_log) { 887 async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s); 888 } else { 889 syslog(LOG_INFO, "%s", s); 890 } 891 } 892 893 extern "C" SANITIZER_WEAK_ATTRIBUTE void android_set_abort_message( 894 const char *); 895 896 void SetAbortMessage(const char *str) { 897 if (&android_set_abort_message) 898 android_set_abort_message(str); 899 } 900 # else 901 void AndroidLogInit() {} 902 903 static bool ShouldLogAfterPrintf() { return true; } 904 905 void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); } 906 907 void SetAbortMessage(const char *str) {} 908 # endif // SANITIZER_ANDROID 909 910 void LogMessageOnPrintf(const char *str) { 911 if (common_flags()->log_to_syslog && ShouldLogAfterPrintf()) 912 WriteToSyslog(str); 913 } 914 915 # endif // SANITIZER_LINUX 916 917 # if SANITIZER_GLIBC && !SANITIZER_GO 918 // glibc crashes when using clock_gettime from a preinit_array function as the 919 // vDSO function pointers haven't been initialized yet. __progname is 920 // initialized after the vDSO function pointers, so if it exists, is not null 921 // and is not empty, we can use clock_gettime. 922 extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname; 923 inline bool CanUseVDSO() { return &__progname && __progname && *__progname; } 924 925 // MonotonicNanoTime is a timing function that can leverage the vDSO by calling 926 // clock_gettime. real_clock_gettime only exists if clock_gettime is 927 // intercepted, so define it weakly and use it if available. 928 extern "C" SANITIZER_WEAK_ATTRIBUTE int real_clock_gettime(u32 clk_id, 929 void *tp); 930 u64 MonotonicNanoTime() { 931 timespec ts; 932 if (CanUseVDSO()) { 933 if (&real_clock_gettime) 934 real_clock_gettime(CLOCK_MONOTONIC, &ts); 935 else 936 clock_gettime(CLOCK_MONOTONIC, &ts); 937 } else { 938 internal_clock_gettime(CLOCK_MONOTONIC, &ts); 939 } 940 return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; 941 } 942 # else 943 // Non-glibc & Go always use the regular function. 944 u64 MonotonicNanoTime() { 945 timespec ts; 946 clock_gettime(CLOCK_MONOTONIC, &ts); 947 return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec; 948 } 949 # endif // SANITIZER_GLIBC && !SANITIZER_GO 950 951 void ReExec() { 952 const char *pathname = "/proc/self/exe"; 953 954 # if SANITIZER_FREEBSD 955 for (const auto *aux = __elf_aux_vector; aux->a_type != AT_NULL; aux++) { 956 if (aux->a_type == AT_EXECPATH) { 957 pathname = static_cast<const char *>(aux->a_un.a_ptr); 958 break; 959 } 960 } 961 # elif SANITIZER_NETBSD 962 static const int name[] = { 963 CTL_KERN, 964 KERN_PROC_ARGS, 965 -1, 966 KERN_PROC_PATHNAME, 967 }; 968 char path[400]; 969 uptr len; 970 971 len = sizeof(path); 972 if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1) 973 pathname = path; 974 # elif SANITIZER_SOLARIS 975 pathname = getexecname(); 976 CHECK_NE(pathname, NULL); 977 # elif SANITIZER_USE_GETAUXVAL 978 // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that 979 // rely on that will fail to load shared libraries. Query AT_EXECFN instead. 980 pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN)); 981 # endif 982 983 uptr rv = internal_execve(pathname, GetArgv(), GetEnviron()); 984 int rverrno; 985 CHECK_EQ(internal_iserror(rv, &rverrno), true); 986 Printf("execve failed, errno %d\n", rverrno); 987 Die(); 988 } 989 990 void UnmapFromTo(uptr from, uptr to) { 991 if (to == from) 992 return; 993 CHECK(to >= from); 994 uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from); 995 if (UNLIKELY(internal_iserror(res))) { 996 Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", 997 SanitizerToolName, to - from, to - from, (void *)from); 998 CHECK("unable to unmap" && 0); 999 } 1000 } 1001 1002 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 1003 uptr min_shadow_base_alignment, UNUSED uptr &high_mem_end, 1004 uptr granularity) { 1005 const uptr alignment = 1006 Max<uptr>(granularity << shadow_scale, 1ULL << min_shadow_base_alignment); 1007 const uptr left_padding = 1008 Max<uptr>(granularity, 1ULL << min_shadow_base_alignment); 1009 1010 const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); 1011 const uptr map_size = shadow_size + left_padding + alignment; 1012 1013 const uptr map_start = (uptr)MmapNoAccess(map_size); 1014 CHECK_NE(map_start, ~(uptr)0); 1015 1016 const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); 1017 1018 UnmapFromTo(map_start, shadow_start - left_padding); 1019 UnmapFromTo(shadow_start + shadow_size, map_start + map_size); 1020 1021 return shadow_start; 1022 } 1023 1024 static uptr MmapSharedNoReserve(uptr addr, uptr size) { 1025 return internal_mmap( 1026 reinterpret_cast<void *>(addr), size, PROT_READ | PROT_WRITE, 1027 MAP_FIXED | MAP_SHARED | MAP_ANONYMOUS | MAP_NORESERVE, -1, 0); 1028 } 1029 1030 static uptr MremapCreateAlias(uptr base_addr, uptr alias_addr, 1031 uptr alias_size) { 1032 # if SANITIZER_LINUX 1033 return internal_mremap(reinterpret_cast<void *>(base_addr), 0, alias_size, 1034 MREMAP_MAYMOVE | MREMAP_FIXED, 1035 reinterpret_cast<void *>(alias_addr)); 1036 # else 1037 CHECK(false && "mremap is not supported outside of Linux"); 1038 return 0; 1039 # endif 1040 } 1041 1042 static void CreateAliases(uptr start_addr, uptr alias_size, uptr num_aliases) { 1043 uptr total_size = alias_size * num_aliases; 1044 uptr mapped = MmapSharedNoReserve(start_addr, total_size); 1045 CHECK_EQ(mapped, start_addr); 1046 1047 for (uptr i = 1; i < num_aliases; ++i) { 1048 uptr alias_addr = start_addr + i * alias_size; 1049 CHECK_EQ(MremapCreateAlias(start_addr, alias_addr, alias_size), alias_addr); 1050 } 1051 } 1052 1053 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, 1054 uptr num_aliases, uptr ring_buffer_size) { 1055 CHECK_EQ(alias_size & (alias_size - 1), 0); 1056 CHECK_EQ(num_aliases & (num_aliases - 1), 0); 1057 CHECK_EQ(ring_buffer_size & (ring_buffer_size - 1), 0); 1058 1059 const uptr granularity = GetMmapGranularity(); 1060 shadow_size = RoundUpTo(shadow_size, granularity); 1061 CHECK_EQ(shadow_size & (shadow_size - 1), 0); 1062 1063 const uptr alias_region_size = alias_size * num_aliases; 1064 const uptr alignment = 1065 2 * Max(Max(shadow_size, alias_region_size), ring_buffer_size); 1066 const uptr left_padding = ring_buffer_size; 1067 1068 const uptr right_size = alignment; 1069 const uptr map_size = left_padding + 2 * alignment; 1070 1071 const uptr map_start = reinterpret_cast<uptr>(MmapNoAccess(map_size)); 1072 CHECK_NE(map_start, static_cast<uptr>(-1)); 1073 const uptr right_start = RoundUpTo(map_start + left_padding, alignment); 1074 1075 UnmapFromTo(map_start, right_start - left_padding); 1076 UnmapFromTo(right_start + right_size, map_start + map_size); 1077 1078 CreateAliases(right_start + right_size / 2, alias_size, num_aliases); 1079 1080 return right_start; 1081 } 1082 1083 void InitializePlatformCommonFlags(CommonFlags *cf) { 1084 # if SANITIZER_ANDROID 1085 if (&__libc_get_static_tls_bounds == nullptr) 1086 cf->detect_leaks = false; 1087 # endif 1088 } 1089 1090 } // namespace __sanitizer 1091 1092 #endif 1093