1 //=-- lsan_common.cpp -----------------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of LeakSanitizer. 10 // Implementation of common leak checking functionality. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "lsan_common.h" 15 16 #include "sanitizer_common/sanitizer_common.h" 17 #include "sanitizer_common/sanitizer_flag_parser.h" 18 #include "sanitizer_common/sanitizer_flags.h" 19 #include "sanitizer_common/sanitizer_placement_new.h" 20 #include "sanitizer_common/sanitizer_procmaps.h" 21 #include "sanitizer_common/sanitizer_report_decorator.h" 22 #include "sanitizer_common/sanitizer_stackdepot.h" 23 #include "sanitizer_common/sanitizer_stacktrace.h" 24 #include "sanitizer_common/sanitizer_suppressions.h" 25 #include "sanitizer_common/sanitizer_thread_registry.h" 26 #include "sanitizer_common/sanitizer_tls_get_addr.h" 27 28 #if CAN_SANITIZE_LEAKS 29 30 # if SANITIZER_APPLE 31 // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127 32 # if SANITIZER_IOS && !SANITIZER_IOSSIM 33 # define OBJC_DATA_MASK 0x0000007ffffffff8UL 34 # else 35 # define OBJC_DATA_MASK 0x00007ffffffffff8UL 36 # endif 37 # endif 38 39 namespace __lsan { 40 41 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and 42 // also to protect the global list of root regions. 43 static Mutex global_mutex; 44 45 void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); } 46 void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); } 47 48 Flags lsan_flags; 49 50 void DisableCounterUnderflow() { 51 if (common_flags()->detect_leaks) { 52 Report("Unmatched call to __lsan_enable().\n"); 53 Die(); 54 } 55 } 56 57 void Flags::SetDefaults() { 58 # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; 59 # include "lsan_flags.inc" 60 # undef LSAN_FLAG 61 } 62 63 void RegisterLsanFlags(FlagParser *parser, Flags *f) { 64 # define LSAN_FLAG(Type, Name, DefaultValue, Description) \ 65 RegisterFlag(parser, #Name, Description, &f->Name); 66 # include "lsan_flags.inc" 67 # undef LSAN_FLAG 68 } 69 70 # define LOG_POINTERS(...) \ 71 do { \ 72 if (flags()->log_pointers) \ 73 Report(__VA_ARGS__); \ 74 } while (0) 75 76 # define LOG_THREADS(...) \ 77 do { \ 78 if (flags()->log_threads) \ 79 Report(__VA_ARGS__); \ 80 } while (0) 81 82 class LeakSuppressionContext { 83 bool parsed = false; 84 SuppressionContext context; 85 bool suppressed_stacks_sorted = true; 86 InternalMmapVector<u32> suppressed_stacks; 87 const LoadedModule *suppress_module = nullptr; 88 89 void LazyInit(); 90 Suppression *GetSuppressionForAddr(uptr addr); 91 bool SuppressInvalid(const StackTrace &stack); 92 bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size); 93 94 public: 95 LeakSuppressionContext(const char *supprression_types[], 96 int suppression_types_num) 97 : context(supprression_types, suppression_types_num) {} 98 99 bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size); 100 101 const InternalMmapVector<u32> &GetSortedSuppressedStacks() { 102 if (!suppressed_stacks_sorted) { 103 suppressed_stacks_sorted = true; 104 SortAndDedup(suppressed_stacks); 105 } 106 return suppressed_stacks; 107 } 108 void PrintMatchedSuppressions(); 109 }; 110 111 alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)]; 112 static LeakSuppressionContext *suppression_ctx = nullptr; 113 static const char kSuppressionLeak[] = "leak"; 114 static const char *kSuppressionTypes[] = {kSuppressionLeak}; 115 static const char kStdSuppressions[] = 116 # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 117 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 118 // definition. 119 "leak:*pthread_exit*\n" 120 # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 121 # if SANITIZER_APPLE 122 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 123 "leak:*_os_trace*\n" 124 # endif 125 // TLS leak in some glibc versions, described in 126 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. 127 "leak:*tls_get_addr*\n"; 128 129 void InitializeSuppressions() { 130 CHECK_EQ(nullptr, suppression_ctx); 131 suppression_ctx = new (suppression_placeholder) 132 LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); 133 } 134 135 void LeakSuppressionContext::LazyInit() { 136 if (!parsed) { 137 parsed = true; 138 context.ParseFromFile(flags()->suppressions); 139 if (&__lsan_default_suppressions) 140 context.Parse(__lsan_default_suppressions()); 141 context.Parse(kStdSuppressions); 142 if (flags()->use_tls && flags()->use_ld_allocations) 143 suppress_module = GetLinker(); 144 } 145 } 146 147 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) { 148 Suppression *s = nullptr; 149 150 // Suppress by module name. 151 const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr); 152 if (!module_name) 153 module_name = "<unknown module>"; 154 if (context.Match(module_name, kSuppressionLeak, &s)) 155 return s; 156 157 // Suppress by file or function name. 158 SymbolizedStackHolder symbolized_stack( 159 Symbolizer::GetOrInit()->SymbolizePC(addr)); 160 const SymbolizedStack *frames = symbolized_stack.get(); 161 for (const SymbolizedStack *cur = frames; cur; cur = cur->next) { 162 if (context.Match(cur->info.function, kSuppressionLeak, &s) || 163 context.Match(cur->info.file, kSuppressionLeak, &s)) { 164 break; 165 } 166 } 167 return s; 168 } 169 170 static uptr GetCallerPC(const StackTrace &stack) { 171 // The top frame is our malloc/calloc/etc. The next frame is the caller. 172 if (stack.size >= 2) 173 return stack.trace[1]; 174 return 0; 175 } 176 177 # if SANITIZER_APPLE 178 // Several pointers in the Objective-C runtime (method cache and class_rw_t, 179 // for example) are tagged with additional bits we need to strip. 180 static inline void *TransformPointer(void *p) { 181 uptr ptr = reinterpret_cast<uptr>(p); 182 return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK); 183 } 184 # endif 185 186 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which 187 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded 188 // modules accounting etc. 189 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. 190 // They are allocated with a __libc_memalign() call in allocate_and_init() 191 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those 192 // blocks, but we can make sure they come from our own allocator by intercepting 193 // __libc_memalign(). On top of that, there is no easy way to reach them. Their 194 // addresses are stored in a dynamically allocated array (the DTV) which is 195 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV 196 // being reachable from the static TLS, and the dynamic TLS being reachable from 197 // the DTV. This is because the initial DTV is allocated before our interception 198 // mechanism kicks in, and thus we don't recognize it as allocated memory. We 199 // can't special-case it either, since we don't know its size. 200 // Our solution is to include in the root set all allocations made from 201 // ld-linux.so (which is where allocate_and_init() is implemented). This is 202 // guaranteed to include all dynamic TLS blocks (and possibly other allocations 203 // which we don't care about). 204 // On all other platforms, this simply checks to ensure that the caller pc is 205 // valid before reporting chunks as leaked. 206 bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) { 207 uptr caller_pc = GetCallerPC(stack); 208 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark 209 // it as reachable, as we can't properly report its allocation stack anyway. 210 return !caller_pc || 211 (suppress_module && suppress_module->containsAddress(caller_pc)); 212 } 213 214 bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack, 215 uptr hit_count, uptr total_size) { 216 for (uptr i = 0; i < stack.size; i++) { 217 Suppression *s = GetSuppressionForAddr( 218 StackTrace::GetPreviousInstructionPc(stack.trace[i])); 219 if (s) { 220 s->weight += total_size; 221 atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed); 222 return true; 223 } 224 } 225 return false; 226 } 227 228 bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count, 229 uptr total_size) { 230 LazyInit(); 231 StackTrace stack = StackDepotGet(stack_trace_id); 232 if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size)) 233 return false; 234 suppressed_stacks_sorted = false; 235 suppressed_stacks.push_back(stack_trace_id); 236 return true; 237 } 238 239 static LeakSuppressionContext *GetSuppressionContext() { 240 CHECK(suppression_ctx); 241 return suppression_ctx; 242 } 243 244 void InitCommonLsan() { 245 if (common_flags()->detect_leaks) { 246 // Initialization which can fail or print warnings should only be done if 247 // LSan is actually enabled. 248 InitializeSuppressions(); 249 InitializePlatformSpecificModules(); 250 } 251 } 252 253 class Decorator : public __sanitizer::SanitizerCommonDecorator { 254 public: 255 Decorator() : SanitizerCommonDecorator() {} 256 const char *Error() { return Red(); } 257 const char *Leak() { return Blue(); } 258 }; 259 260 static inline bool MaybeUserPointer(uptr p) { 261 // Since our heap is located in mmap-ed memory, we can assume a sensible lower 262 // bound on heap addresses. 263 const uptr kMinAddress = 4 * 4096; 264 if (p < kMinAddress) 265 return false; 266 # if defined(__x86_64__) 267 // TODO: support LAM48 and 5 level page tables. 268 // LAM_U57 mask format 269 // * top byte: 0x81 because the format is: [0] [6-bit tag] [0] 270 // * top-1 byte: 0xff because it should be 0 271 // * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff 272 constexpr uptr kLAM_U57Mask = 0x81ff80; 273 constexpr uptr kPointerMask = kLAM_U57Mask << 40; 274 return ((p & kPointerMask) == 0); 275 # elif defined(__mips64) 276 return ((p >> 40) == 0); 277 # elif defined(__aarch64__) 278 // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in 279 // address translation and can be used to store a tag. 280 constexpr uptr kPointerMask = 255ULL << 48; 281 // Accept up to 48 bit VMA. 282 return ((p & kPointerMask) == 0); 283 # elif defined(__loongarch_lp64) 284 // Allow 47-bit user-space VMA at current. 285 return ((p >> 47) == 0); 286 # else 287 return true; 288 # endif 289 } 290 291 namespace { 292 struct DirectMemoryAccessor { 293 void Init(uptr begin, uptr end) {}; 294 void *LoadPtr(uptr p) const { return *reinterpret_cast<void **>(p); } 295 }; 296 297 struct CopyMemoryAccessor { 298 void Init(uptr begin, uptr end) { 299 this->begin = begin; 300 buffer.clear(); 301 buffer.resize(end - begin); 302 MemCpyAccessible(buffer.data(), reinterpret_cast<void *>(begin), 303 buffer.size()); 304 }; 305 306 void *LoadPtr(uptr p) const { 307 uptr offset = p - begin; 308 CHECK_LE(offset + sizeof(void *), reinterpret_cast<uptr>(buffer.size())); 309 return *reinterpret_cast<void **>(offset + 310 reinterpret_cast<uptr>(buffer.data())); 311 } 312 313 private: 314 uptr begin; 315 InternalMmapVector<char> buffer; 316 }; 317 } // namespace 318 319 // Scans the memory range, looking for byte patterns that point into allocator 320 // chunks. Marks those chunks with |tag| and adds them to |frontier|. 321 // There are two usage modes for this function: finding reachable chunks 322 // (|tag| = kReachable) and finding indirectly leaked chunks 323 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, 324 // so |frontier| = 0. 325 template <class Accessor> 326 void ScanForPointers(uptr begin, uptr end, Frontier *frontier, 327 const char *region_type, ChunkTag tag, 328 Accessor &accessor) { 329 CHECK(tag == kReachable || tag == kIndirectlyLeaked); 330 const uptr alignment = flags()->pointer_alignment(); 331 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin, 332 (void *)end); 333 accessor.Init(begin, end); 334 uptr pp = begin; 335 if (pp % alignment) 336 pp = pp + alignment - pp % alignment; 337 for (; pp + sizeof(void *) <= end; pp += alignment) { 338 void *p = accessor.LoadPtr(pp); 339 # if SANITIZER_APPLE 340 p = TransformPointer(p); 341 # endif 342 if (!MaybeUserPointer(reinterpret_cast<uptr>(p))) 343 continue; 344 uptr chunk = PointsIntoChunk(p); 345 if (!chunk) 346 continue; 347 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. 348 if (chunk == begin) 349 continue; 350 LsanMetadata m(chunk); 351 if (m.tag() == kReachable || m.tag() == kIgnored) 352 continue; 353 354 // Do this check relatively late so we can log only the interesting cases. 355 if (!flags()->use_poisoned && WordIsPoisoned(pp)) { 356 LOG_POINTERS( 357 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " 358 "%zu.\n", 359 (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()), 360 m.requested_size()); 361 continue; 362 } 363 364 m.set_tag(tag); 365 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", 366 (void *)pp, p, (void *)chunk, 367 (void *)(chunk + m.requested_size()), m.requested_size()); 368 if (frontier) 369 frontier->push_back(chunk); 370 } 371 } 372 373 void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, 374 const char *region_type, ChunkTag tag) { 375 DirectMemoryAccessor accessor; 376 ScanForPointers(begin, end, frontier, region_type, tag, accessor); 377 } 378 379 // Scans a global range for pointers 380 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { 381 uptr allocator_begin = 0, allocator_end = 0; 382 GetAllocatorGlobalRange(&allocator_begin, &allocator_end); 383 if (begin <= allocator_begin && allocator_begin < end) { 384 CHECK_LE(allocator_begin, allocator_end); 385 CHECK_LE(allocator_end, end); 386 if (begin < allocator_begin) 387 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", 388 kReachable); 389 if (allocator_end < end) 390 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); 391 } else { 392 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); 393 } 394 } 395 396 template <class Accessor> 397 void ScanRanges(const InternalMmapVector<Range> &ranges, Frontier *frontier, 398 const char *region_type, Accessor &accessor) { 399 for (uptr i = 0; i < ranges.size(); i++) { 400 ScanForPointers(ranges[i].begin, ranges[i].end, frontier, region_type, 401 kReachable, accessor); 402 } 403 } 404 405 void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges, 406 Frontier *frontier) { 407 DirectMemoryAccessor accessor; 408 ScanRanges(ranges, frontier, "FAKE STACK", accessor); 409 } 410 411 # if SANITIZER_FUCHSIA 412 413 // Fuchsia handles all threads together with its own callback. 414 static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t, 415 uptr) {} 416 417 # else 418 419 # if SANITIZER_ANDROID 420 // FIXME: Move this out into *libcdep.cpp 421 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls( 422 pid_t, void (*cb)(void *, void *, uptr, void *), void *); 423 # endif 424 425 static void ProcessThreadRegistry(Frontier *frontier) { 426 InternalMmapVector<uptr> ptrs; 427 GetAdditionalThreadContextPtrsLocked(&ptrs); 428 429 for (uptr i = 0; i < ptrs.size(); ++i) { 430 void *ptr = reinterpret_cast<void *>(ptrs[i]); 431 uptr chunk = PointsIntoChunk(ptr); 432 if (!chunk) 433 continue; 434 LsanMetadata m(chunk); 435 if (!m.allocated()) 436 continue; 437 438 // Mark as reachable and add to frontier. 439 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr); 440 m.set_tag(kReachable); 441 frontier->push_back(chunk); 442 } 443 } 444 445 // Scans thread data (stacks and TLS) for heap pointers. 446 template <class Accessor> 447 static void ProcessThread(tid_t os_id, uptr sp, 448 const InternalMmapVector<uptr> ®isters, 449 InternalMmapVector<Range> &extra_ranges, 450 Frontier *frontier, Accessor &accessor) { 451 // `extra_ranges` is outside of the function and the loop to reused mapped 452 // memory. 453 CHECK(extra_ranges.empty()); 454 LOG_THREADS("Processing thread %llu.\n", os_id); 455 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 456 DTLS *dtls; 457 bool thread_found = 458 GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin, 459 &tls_end, &cache_begin, &cache_end, &dtls); 460 if (!thread_found) { 461 // If a thread can't be found in the thread registry, it's probably in the 462 // process of destruction. Log this event and move on. 463 LOG_THREADS("Thread %llu not found in registry.\n", os_id); 464 return; 465 } 466 467 if (!sp) 468 sp = stack_begin; 469 470 if (flags()->use_registers) { 471 uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 472 uptr registers_end = 473 reinterpret_cast<uptr>(registers.data() + registers.size()); 474 ScanForPointers(registers_begin, registers_end, frontier, "REGISTERS", 475 kReachable, accessor); 476 } 477 478 if (flags()->use_stacks) { 479 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin, 480 (void *)stack_end, (void *)sp); 481 if (sp < stack_begin || sp >= stack_end) { 482 // SP is outside the recorded stack range (e.g. the thread is running a 483 // signal handler on alternate stack, or swapcontext was used). 484 // Again, consider the entire stack range to be reachable. 485 LOG_THREADS("WARNING: stack pointer not in stack range.\n"); 486 uptr page_size = GetPageSizeCached(); 487 int skipped = 0; 488 while (stack_begin < stack_end && 489 !IsAccessibleMemoryRange(stack_begin, 1)) { 490 skipped++; 491 stack_begin += page_size; 492 } 493 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped, 494 (void *)stack_begin, (void *)stack_end); 495 } else { 496 // Shrink the stack range to ignore out-of-scope values. 497 stack_begin = sp; 498 } 499 ScanForPointers(stack_begin, stack_end, frontier, "STACK", kReachable, 500 accessor); 501 GetThreadExtraStackRangesLocked(os_id, &extra_ranges); 502 ScanRanges(extra_ranges, frontier, "FAKE STACK", accessor); 503 } 504 505 if (flags()->use_tls) { 506 if (tls_begin) { 507 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end); 508 // If the tls and cache ranges don't overlap, scan full tls range, 509 // otherwise, only scan the non-overlapping portions 510 if (cache_begin == cache_end || tls_end < cache_begin || 511 tls_begin > cache_end) { 512 ScanForPointers(tls_begin, tls_end, frontier, "TLS", kReachable, 513 accessor); 514 } else { 515 if (tls_begin < cache_begin) 516 ScanForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable, 517 accessor); 518 if (tls_end > cache_end) 519 ScanForPointers(cache_end, tls_end, frontier, "TLS", kReachable, 520 accessor); 521 } 522 } 523 # if SANITIZER_ANDROID 524 extra_ranges.clear(); 525 auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/, 526 void *arg) -> void { 527 reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back( 528 {reinterpret_cast<uptr>(dtls_begin), 529 reinterpret_cast<uptr>(dtls_end)}); 530 }; 531 ScanRanges(extra_ranges, frontier, "DTLS", accessor); 532 // FIXME: There might be a race-condition here (and in Bionic) if the 533 // thread is suspended in the middle of updating its DTLS. IOWs, we 534 // could scan already freed memory. (probably fine for now) 535 __libc_iterate_dynamic_tls(os_id, cb, frontier); 536 # else 537 if (dtls && !DTLSInDestruction(dtls)) { 538 ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) { 539 uptr dtls_beg = dtv.beg; 540 uptr dtls_end = dtls_beg + dtv.size; 541 if (dtls_beg < dtls_end) { 542 LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg, 543 (void *)dtls_end); 544 ScanForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable, 545 accessor); 546 } 547 }); 548 } else { 549 // We are handling a thread with DTLS under destruction. Log about 550 // this and continue. 551 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id); 552 } 553 # endif 554 } 555 } 556 557 static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 558 Frontier *frontier, tid_t caller_tid, 559 uptr caller_sp) { 560 InternalMmapVector<tid_t> done_threads; 561 InternalMmapVector<uptr> registers; 562 InternalMmapVector<Range> extra_ranges; 563 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { 564 registers.clear(); 565 extra_ranges.clear(); 566 567 const tid_t os_id = suspended_threads.GetThreadID(i); 568 uptr sp = 0; 569 PtraceRegistersStatus have_registers = 570 suspended_threads.GetRegistersAndSP(i, ®isters, &sp); 571 if (have_registers != REGISTERS_AVAILABLE) { 572 VReport(1, "Unable to get registers from thread %llu.\n", os_id); 573 // If unable to get SP, consider the entire stack to be reachable unless 574 // GetRegistersAndSP failed with ESRCH. 575 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) 576 continue; 577 sp = 0; 578 } 579 580 if (os_id == caller_tid) 581 sp = caller_sp; 582 583 DirectMemoryAccessor accessor; 584 ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor); 585 if (flags()->use_detached) 586 done_threads.push_back(os_id); 587 } 588 589 if (flags()->use_detached) { 590 CopyMemoryAccessor accessor; 591 InternalMmapVector<tid_t> known_threads; 592 GetRunningThreadsLocked(&known_threads); 593 Sort(done_threads.data(), done_threads.size()); 594 for (tid_t os_id : known_threads) { 595 registers.clear(); 596 extra_ranges.clear(); 597 598 uptr i = InternalLowerBound(done_threads, os_id); 599 if (i >= done_threads.size() || done_threads[i] != os_id) { 600 uptr sp = (os_id == caller_tid) ? caller_sp : 0; 601 ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor); 602 } 603 } 604 } 605 606 // Add pointers reachable from ThreadContexts 607 ProcessThreadRegistry(frontier); 608 } 609 610 # endif // SANITIZER_FUCHSIA 611 612 // A map that contains [region_begin, region_end) pairs. 613 using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>; 614 615 static RootRegions &GetRootRegionsLocked() { 616 global_mutex.CheckLocked(); 617 static RootRegions *regions = nullptr; 618 alignas(RootRegions) static char placeholder[sizeof(RootRegions)]; 619 if (!regions) 620 regions = new (placeholder) RootRegions(); 621 return *regions; 622 } 623 624 bool HasRootRegions() { return !GetRootRegionsLocked().empty(); } 625 626 void ScanRootRegions(Frontier *frontier, 627 const InternalMmapVectorNoCtor<Region> &mapped_regions) { 628 if (!flags()->use_root_regions) 629 return; 630 631 InternalMmapVector<Region> regions; 632 GetRootRegionsLocked().forEach([&](const auto &kv) { 633 regions.push_back({kv.first.first, kv.first.second}); 634 return true; 635 }); 636 637 InternalMmapVector<Region> intersection; 638 Intersect(mapped_regions, regions, intersection); 639 640 for (const Region &r : intersection) { 641 LOG_POINTERS("Root region intersects with mapped region at %p-%p\n", 642 (void *)r.begin, (void *)r.end); 643 ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable); 644 } 645 } 646 647 // Scans root regions for heap pointers. 648 static void ProcessRootRegions(Frontier *frontier) { 649 if (!flags()->use_root_regions || !HasRootRegions()) 650 return; 651 MemoryMappingLayout proc_maps(/*cache_enabled*/ true); 652 MemoryMappedSegment segment; 653 InternalMmapVector<Region> mapped_regions; 654 while (proc_maps.Next(&segment)) 655 if (segment.IsReadable()) 656 mapped_regions.push_back({segment.start, segment.end}); 657 ScanRootRegions(frontier, mapped_regions); 658 } 659 660 static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 661 while (frontier->size()) { 662 uptr next_chunk = frontier->back(); 663 frontier->pop_back(); 664 LsanMetadata m(next_chunk); 665 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 666 "HEAP", tag); 667 } 668 } 669 670 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks 671 // which are reachable from it as indirectly leaked. 672 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { 673 chunk = GetUserBegin(chunk); 674 LsanMetadata m(chunk); 675 if (m.allocated() && m.tag() != kReachable) { 676 ScanRangeForPointers(chunk, chunk + m.requested_size(), 677 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); 678 } 679 } 680 681 static void IgnoredSuppressedCb(uptr chunk, void *arg) { 682 CHECK(arg); 683 chunk = GetUserBegin(chunk); 684 LsanMetadata m(chunk); 685 if (!m.allocated() || m.tag() == kIgnored) 686 return; 687 688 const InternalMmapVector<u32> &suppressed = 689 *static_cast<const InternalMmapVector<u32> *>(arg); 690 uptr idx = InternalLowerBound(suppressed, m.stack_trace_id()); 691 if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx]) 692 return; 693 694 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk, 695 (void *)(chunk + m.requested_size()), m.requested_size()); 696 m.set_tag(kIgnored); 697 } 698 699 // ForEachChunk callback. If chunk is marked as ignored, adds its address to 700 // frontier. 701 static void CollectIgnoredCb(uptr chunk, void *arg) { 702 CHECK(arg); 703 chunk = GetUserBegin(chunk); 704 LsanMetadata m(chunk); 705 if (m.allocated() && m.tag() == kIgnored) { 706 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk, 707 (void *)(chunk + m.requested_size()), m.requested_size()); 708 reinterpret_cast<Frontier *>(arg)->push_back(chunk); 709 } 710 } 711 712 // Sets the appropriate tag on each chunk. 713 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads, 714 Frontier *frontier, tid_t caller_tid, 715 uptr caller_sp) { 716 const InternalMmapVector<u32> &suppressed_stacks = 717 GetSuppressionContext()->GetSortedSuppressedStacks(); 718 if (!suppressed_stacks.empty()) { 719 ForEachChunk(IgnoredSuppressedCb, 720 const_cast<InternalMmapVector<u32> *>(&suppressed_stacks)); 721 } 722 ForEachChunk(CollectIgnoredCb, frontier); 723 ProcessGlobalRegions(frontier); 724 ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp); 725 ProcessRootRegions(frontier); 726 FloodFillTag(frontier, kReachable); 727 728 // The check here is relatively expensive, so we do this in a separate flood 729 // fill. That way we can skip the check for chunks that are reachable 730 // otherwise. 731 LOG_POINTERS("Processing platform-specific allocations.\n"); 732 ProcessPlatformSpecificAllocations(frontier); 733 FloodFillTag(frontier, kReachable); 734 735 // Iterate over leaked chunks and mark those that are reachable from other 736 // leaked chunks. 737 LOG_POINTERS("Scanning leaked chunks.\n"); 738 ForEachChunk(MarkIndirectlyLeakedCb, nullptr); 739 } 740 741 // ForEachChunk callback. Resets the tags to pre-leak-check state. 742 static void ResetTagsCb(uptr chunk, void *arg) { 743 (void)arg; 744 chunk = GetUserBegin(chunk); 745 LsanMetadata m(chunk); 746 if (m.allocated() && m.tag() != kIgnored) 747 m.set_tag(kDirectlyLeaked); 748 } 749 750 // ForEachChunk callback. Aggregates information about unreachable chunks into 751 // a LeakReport. 752 static void CollectLeaksCb(uptr chunk, void *arg) { 753 CHECK(arg); 754 LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg); 755 chunk = GetUserBegin(chunk); 756 LsanMetadata m(chunk); 757 if (!m.allocated()) 758 return; 759 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) 760 leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()}); 761 } 762 763 void LeakSuppressionContext::PrintMatchedSuppressions() { 764 InternalMmapVector<Suppression *> matched; 765 context.GetMatched(&matched); 766 if (!matched.size()) 767 return; 768 const char *line = "-----------------------------------------------------"; 769 Printf("%s\n", line); 770 Printf("Suppressions used:\n"); 771 Printf(" count bytes template\n"); 772 for (uptr i = 0; i < matched.size(); i++) { 773 Printf("%7zu %10zu %s\n", 774 static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)), 775 matched[i]->weight, matched[i]->templ); 776 } 777 Printf("%s\n\n", line); 778 } 779 780 # if SANITIZER_FUCHSIA 781 782 // Fuchsia provides a libc interface that guarantees all threads are 783 // covered, and SuspendedThreadList is never really used. 784 static bool ReportUnsuspendedThreads(const SuspendedThreadsList &) { 785 return true; 786 } 787 788 # else // !SANITIZER_FUCHSIA 789 790 static bool ReportUnsuspendedThreads( 791 const SuspendedThreadsList &suspended_threads) { 792 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); 793 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) 794 threads[i] = suspended_threads.GetThreadID(i); 795 796 Sort(threads.data(), threads.size()); 797 798 InternalMmapVector<tid_t> known_threads; 799 GetRunningThreadsLocked(&known_threads); 800 801 bool succeded = true; 802 for (auto os_id : known_threads) { 803 uptr i = InternalLowerBound(threads, os_id); 804 if (i >= threads.size() || threads[i] != os_id) { 805 succeded = false; 806 Report( 807 "Running thread %zu was not suspended. False leaks are possible.\n", 808 os_id); 809 } 810 } 811 return succeded; 812 } 813 814 # endif // !SANITIZER_FUCHSIA 815 816 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, 817 void *arg) { 818 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); 819 CHECK(param); 820 CHECK(!param->success); 821 if (!ReportUnsuspendedThreads(suspended_threads)) { 822 switch (flags()->thread_suspend_fail) { 823 case 0: 824 param->success = true; 825 return; 826 case 1: 827 break; 828 case 2: 829 // Will crash on return. 830 return; 831 } 832 } 833 ClassifyAllChunks(suspended_threads, ¶m->frontier, param->caller_tid, 834 param->caller_sp); 835 ForEachChunk(CollectLeaksCb, ¶m->leaks); 836 // Clean up for subsequent leak checks. This assumes we did not overwrite any 837 // kIgnored tags. 838 ForEachChunk(ResetTagsCb, nullptr); 839 param->success = true; 840 } 841 842 static bool PrintResults(LeakReport &report) { 843 uptr unsuppressed_count = report.UnsuppressedLeakCount(); 844 if (unsuppressed_count) { 845 Decorator d; 846 Printf( 847 "\n" 848 "=================================================================" 849 "\n"); 850 Printf("%s", d.Error()); 851 Report("ERROR: LeakSanitizer: detected memory leaks\n"); 852 Printf("%s", d.Default()); 853 report.ReportTopLeaks(flags()->max_leaks); 854 } 855 if (common_flags()->print_suppressions) 856 GetSuppressionContext()->PrintMatchedSuppressions(); 857 if (unsuppressed_count) 858 report.PrintSummary(); 859 if ((unsuppressed_count && common_flags()->verbosity >= 2) || 860 flags()->log_threads) 861 PrintThreads(); 862 return unsuppressed_count; 863 } 864 865 static bool CheckForLeaksOnce() { 866 if (&__lsan_is_turned_off && __lsan_is_turned_off()) { 867 VReport(1, "LeakSanitizer is disabled\n"); 868 return false; 869 } 870 VReport(1, "LeakSanitizer: checking for leaks\n"); 871 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match 872 // suppressions. However if a stack id was previously suppressed, it should be 873 // suppressed in future checks as well. 874 for (int i = 0;; ++i) { 875 EnsureMainThreadIDIsCorrect(); 876 CheckForLeaksParam param; 877 // Capture calling thread's stack pointer early, to avoid false negatives. 878 // Old frame with dead pointers might be overlapped by new frame inside 879 // CheckForLeaks which does not use bytes with pointers before the 880 // threads are suspended and stack pointers captured. 881 param.caller_tid = GetTid(); 882 param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0)); 883 LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m); 884 if (!param.success) { 885 Report("LeakSanitizer has encountered a fatal error.\n"); 886 Report( 887 "HINT: For debugging, try setting environment variable " 888 "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); 889 Report( 890 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, " 891 "etc)\n"); 892 Die(); 893 } 894 LeakReport leak_report; 895 leak_report.AddLeakedChunks(param.leaks); 896 897 // No new suppressions stacks, so rerun will not help and we can report. 898 if (!leak_report.ApplySuppressions()) 899 return PrintResults(leak_report); 900 901 // No indirect leaks to report, so we are done here. 902 if (!leak_report.IndirectUnsuppressedLeakCount()) 903 return PrintResults(leak_report); 904 905 if (i >= 8) { 906 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n"); 907 return PrintResults(leak_report); 908 } 909 910 // We found a new previously unseen suppressed call stack. Rerun to make 911 // sure it does not hold indirect leaks. 912 VReport(1, "Rerun with %zu suppressed stacks.", 913 GetSuppressionContext()->GetSortedSuppressedStacks().size()); 914 } 915 } 916 917 static bool CheckForLeaks() { 918 int leaking_tries = 0; 919 for (int i = 0; i < flags()->tries; ++i) leaking_tries += CheckForLeaksOnce(); 920 return leaking_tries == flags()->tries; 921 } 922 923 static bool has_reported_leaks = false; 924 bool HasReportedLeaks() { return has_reported_leaks; } 925 926 void DoLeakCheck() { 927 Lock l(&global_mutex); 928 static bool already_done; 929 if (already_done) 930 return; 931 already_done = true; 932 has_reported_leaks = CheckForLeaks(); 933 if (has_reported_leaks) 934 HandleLeaks(); 935 } 936 937 static int DoRecoverableLeakCheck() { 938 Lock l(&global_mutex); 939 bool have_leaks = CheckForLeaks(); 940 return have_leaks ? 1 : 0; 941 } 942 943 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } 944 945 ///// LeakReport implementation. ///// 946 947 // A hard limit on the number of distinct leaks, to avoid quadratic complexity 948 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks 949 // in real-world applications. 950 // FIXME: Get rid of this limit by moving logic into DedupLeaks. 951 const uptr kMaxLeaksConsidered = 5000; 952 953 void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) { 954 for (const LeakedChunk &leak : chunks) { 955 uptr chunk = leak.chunk; 956 u32 stack_trace_id = leak.stack_trace_id; 957 uptr leaked_size = leak.leaked_size; 958 ChunkTag tag = leak.tag; 959 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 960 961 if (u32 resolution = flags()->resolution) { 962 StackTrace stack = StackDepotGet(stack_trace_id); 963 stack.size = Min(stack.size, resolution); 964 stack_trace_id = StackDepotPut(stack); 965 } 966 967 bool is_directly_leaked = (tag == kDirectlyLeaked); 968 uptr i; 969 for (i = 0; i < leaks_.size(); i++) { 970 if (leaks_[i].stack_trace_id == stack_trace_id && 971 leaks_[i].is_directly_leaked == is_directly_leaked) { 972 leaks_[i].hit_count++; 973 leaks_[i].total_size += leaked_size; 974 break; 975 } 976 } 977 if (i == leaks_.size()) { 978 if (leaks_.size() == kMaxLeaksConsidered) 979 return; 980 Leak leak = {next_id_++, /* hit_count */ 1, 981 leaked_size, stack_trace_id, 982 is_directly_leaked, /* is_suppressed */ false}; 983 leaks_.push_back(leak); 984 } 985 if (flags()->report_objects) { 986 LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size}; 987 leaked_objects_.push_back(obj); 988 } 989 } 990 } 991 992 static bool LeakComparator(const Leak &leak1, const Leak &leak2) { 993 if (leak1.is_directly_leaked == leak2.is_directly_leaked) 994 return leak1.total_size > leak2.total_size; 995 else 996 return leak1.is_directly_leaked; 997 } 998 999 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { 1000 CHECK(leaks_.size() <= kMaxLeaksConsidered); 1001 Printf("\n"); 1002 if (leaks_.size() == kMaxLeaksConsidered) 1003 Printf( 1004 "Too many leaks! Only the first %zu leaks encountered will be " 1005 "reported.\n", 1006 kMaxLeaksConsidered); 1007 1008 uptr unsuppressed_count = UnsuppressedLeakCount(); 1009 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) 1010 Printf("The %zu top leak(s):\n", num_leaks_to_report); 1011 Sort(leaks_.data(), leaks_.size(), &LeakComparator); 1012 uptr leaks_reported = 0; 1013 for (uptr i = 0; i < leaks_.size(); i++) { 1014 if (leaks_[i].is_suppressed) 1015 continue; 1016 PrintReportForLeak(i); 1017 leaks_reported++; 1018 if (leaks_reported == num_leaks_to_report) 1019 break; 1020 } 1021 if (leaks_reported < unsuppressed_count) { 1022 uptr remaining = unsuppressed_count - leaks_reported; 1023 Printf("Omitting %zu more leak(s).\n", remaining); 1024 } 1025 } 1026 1027 void LeakReport::PrintReportForLeak(uptr index) { 1028 Decorator d; 1029 Printf("%s", d.Leak()); 1030 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", 1031 leaks_[index].is_directly_leaked ? "Direct" : "Indirect", 1032 leaks_[index].total_size, leaks_[index].hit_count); 1033 Printf("%s", d.Default()); 1034 1035 CHECK(leaks_[index].stack_trace_id); 1036 StackDepotGet(leaks_[index].stack_trace_id).Print(); 1037 1038 if (flags()->report_objects) { 1039 Printf("Objects leaked above:\n"); 1040 PrintLeakedObjectsForLeak(index); 1041 Printf("\n"); 1042 } 1043 } 1044 1045 void LeakReport::PrintLeakedObjectsForLeak(uptr index) { 1046 u32 leak_id = leaks_[index].id; 1047 for (uptr j = 0; j < leaked_objects_.size(); j++) { 1048 if (leaked_objects_[j].leak_id == leak_id) 1049 Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr, 1050 leaked_objects_[j].size); 1051 } 1052 } 1053 1054 void LeakReport::PrintSummary() { 1055 CHECK(leaks_.size() <= kMaxLeaksConsidered); 1056 uptr bytes = 0, allocations = 0; 1057 for (uptr i = 0; i < leaks_.size(); i++) { 1058 if (leaks_[i].is_suppressed) 1059 continue; 1060 bytes += leaks_[i].total_size; 1061 allocations += leaks_[i].hit_count; 1062 } 1063 InternalScopedString summary; 1064 summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes, 1065 allocations); 1066 ReportErrorSummary(summary.data()); 1067 } 1068 1069 uptr LeakReport::ApplySuppressions() { 1070 LeakSuppressionContext *suppressions = GetSuppressionContext(); 1071 uptr new_suppressions = 0; 1072 for (uptr i = 0; i < leaks_.size(); i++) { 1073 if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count, 1074 leaks_[i].total_size)) { 1075 leaks_[i].is_suppressed = true; 1076 ++new_suppressions; 1077 } 1078 } 1079 return new_suppressions; 1080 } 1081 1082 uptr LeakReport::UnsuppressedLeakCount() { 1083 uptr result = 0; 1084 for (uptr i = 0; i < leaks_.size(); i++) 1085 if (!leaks_[i].is_suppressed) 1086 result++; 1087 return result; 1088 } 1089 1090 uptr LeakReport::IndirectUnsuppressedLeakCount() { 1091 uptr result = 0; 1092 for (uptr i = 0; i < leaks_.size(); i++) 1093 if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked) 1094 result++; 1095 return result; 1096 } 1097 1098 } // namespace __lsan 1099 #else // CAN_SANITIZE_LEAKS 1100 namespace __lsan { 1101 void InitCommonLsan() {} 1102 void DoLeakCheck() {} 1103 void DoRecoverableLeakCheckVoid() {} 1104 void DisableInThisThread() {} 1105 void EnableInThisThread() {} 1106 } // namespace __lsan 1107 #endif // CAN_SANITIZE_LEAKS 1108 1109 using namespace __lsan; 1110 1111 extern "C" { 1112 SANITIZER_INTERFACE_ATTRIBUTE 1113 void __lsan_ignore_object(const void *p) { 1114 #if CAN_SANITIZE_LEAKS 1115 if (!common_flags()->detect_leaks) 1116 return; 1117 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 1118 // locked. 1119 Lock l(&global_mutex); 1120 IgnoreObjectResult res = IgnoreObject(p); 1121 if (res == kIgnoreObjectInvalid) 1122 VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p); 1123 if (res == kIgnoreObjectAlreadyIgnored) 1124 VReport(1, 1125 "__lsan_ignore_object(): " 1126 "heap object at %p is already being ignored\n", 1127 p); 1128 if (res == kIgnoreObjectSuccess) 1129 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); 1130 #endif // CAN_SANITIZE_LEAKS 1131 } 1132 1133 SANITIZER_INTERFACE_ATTRIBUTE 1134 void __lsan_register_root_region(const void *begin, uptr size) { 1135 #if CAN_SANITIZE_LEAKS 1136 VReport(1, "Registered root region at %p of size %zu\n", begin, size); 1137 uptr b = reinterpret_cast<uptr>(begin); 1138 uptr e = b + size; 1139 CHECK_LT(b, e); 1140 1141 Lock l(&global_mutex); 1142 ++GetRootRegionsLocked()[{b, e}]; 1143 #endif // CAN_SANITIZE_LEAKS 1144 } 1145 1146 SANITIZER_INTERFACE_ATTRIBUTE 1147 void __lsan_unregister_root_region(const void *begin, uptr size) { 1148 #if CAN_SANITIZE_LEAKS 1149 uptr b = reinterpret_cast<uptr>(begin); 1150 uptr e = b + size; 1151 CHECK_LT(b, e); 1152 VReport(1, "Unregistered root region at %p of size %zu\n", begin, size); 1153 1154 { 1155 Lock l(&global_mutex); 1156 if (auto *f = GetRootRegionsLocked().find({b, e})) { 1157 if (--(f->second) == 0) 1158 GetRootRegionsLocked().erase(f); 1159 return; 1160 } 1161 } 1162 Report( 1163 "__lsan_unregister_root_region(): region at %p of size %zu has not " 1164 "been registered.\n", 1165 begin, size); 1166 Die(); 1167 #endif // CAN_SANITIZE_LEAKS 1168 } 1169 1170 SANITIZER_INTERFACE_ATTRIBUTE 1171 void __lsan_disable() { 1172 #if CAN_SANITIZE_LEAKS 1173 __lsan::DisableInThisThread(); 1174 #endif 1175 } 1176 1177 SANITIZER_INTERFACE_ATTRIBUTE 1178 void __lsan_enable() { 1179 #if CAN_SANITIZE_LEAKS 1180 __lsan::EnableInThisThread(); 1181 #endif 1182 } 1183 1184 SANITIZER_INTERFACE_ATTRIBUTE 1185 void __lsan_do_leak_check() { 1186 #if CAN_SANITIZE_LEAKS 1187 if (common_flags()->detect_leaks) 1188 __lsan::DoLeakCheck(); 1189 #endif // CAN_SANITIZE_LEAKS 1190 } 1191 1192 SANITIZER_INTERFACE_ATTRIBUTE 1193 int __lsan_do_recoverable_leak_check() { 1194 #if CAN_SANITIZE_LEAKS 1195 if (common_flags()->detect_leaks) 1196 return __lsan::DoRecoverableLeakCheck(); 1197 #endif // CAN_SANITIZE_LEAKS 1198 return 0; 1199 } 1200 1201 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) { 1202 return ""; 1203 } 1204 1205 #if !SANITIZER_SUPPORTS_WEAK_HOOKS 1206 SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) { 1207 return 0; 1208 } 1209 1210 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) { 1211 return ""; 1212 } 1213 #endif 1214 } // extern "C" 1215