xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision 0fca6ea1d4eea4c934cfff25ac9ee8ad6fe95583)
168d75effSDimitry Andric //=-- lsan_common.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of LeakSanitizer.
1068d75effSDimitry Andric // Implementation of common leak checking functionality.
1168d75effSDimitry Andric //
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric 
1468d75effSDimitry Andric #include "lsan_common.h"
1568d75effSDimitry Andric 
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_flag_parser.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_flags.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_procmaps.h"
2168d75effSDimitry Andric #include "sanitizer_common/sanitizer_report_decorator.h"
2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2368d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h"
2468d75effSDimitry Andric #include "sanitizer_common/sanitizer_suppressions.h"
2568d75effSDimitry Andric #include "sanitizer_common/sanitizer_thread_registry.h"
2668d75effSDimitry Andric #include "sanitizer_common/sanitizer_tls_get_addr.h"
2768d75effSDimitry Andric 
2868d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
29bdd1243dSDimitry Andric 
30bdd1243dSDimitry Andric #  if SANITIZER_APPLE
31bdd1243dSDimitry Andric // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32bdd1243dSDimitry Andric #    if SANITIZER_IOS && !SANITIZER_IOSSIM
33bdd1243dSDimitry Andric #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
34bdd1243dSDimitry Andric #    else
35bdd1243dSDimitry Andric #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
36bdd1243dSDimitry Andric #    endif
37bdd1243dSDimitry Andric #  endif
38bdd1243dSDimitry Andric 
3968d75effSDimitry Andric namespace __lsan {
4068d75effSDimitry Andric 
4168d75effSDimitry Andric // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
4268d75effSDimitry Andric // also to protect the global list of root regions.
4306c3fb27SDimitry Andric static Mutex global_mutex;
4468d75effSDimitry Andric 
455f757f3fSDimitry Andric void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
465f757f3fSDimitry Andric void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
475f757f3fSDimitry Andric 
4868d75effSDimitry Andric Flags lsan_flags;
4968d75effSDimitry Andric 
5068d75effSDimitry Andric void DisableCounterUnderflow() {
5168d75effSDimitry Andric   if (common_flags()->detect_leaks) {
5268d75effSDimitry Andric     Report("Unmatched call to __lsan_enable().\n");
5368d75effSDimitry Andric     Die();
5468d75effSDimitry Andric   }
5568d75effSDimitry Andric }
5668d75effSDimitry Andric 
5768d75effSDimitry Andric void Flags::SetDefaults() {
5868d75effSDimitry Andric #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
5968d75effSDimitry Andric #  include "lsan_flags.inc"
6068d75effSDimitry Andric #  undef LSAN_FLAG
6168d75effSDimitry Andric }
6268d75effSDimitry Andric 
6368d75effSDimitry Andric void RegisterLsanFlags(FlagParser *parser, Flags *f) {
6468d75effSDimitry Andric #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
6568d75effSDimitry Andric     RegisterFlag(parser, #Name, Description, &f->Name);
6668d75effSDimitry Andric #  include "lsan_flags.inc"
6768d75effSDimitry Andric #  undef LSAN_FLAG
6868d75effSDimitry Andric }
6968d75effSDimitry Andric 
7068d75effSDimitry Andric #  define LOG_POINTERS(...)      \
7168d75effSDimitry Andric     do {                         \
720eae32dcSDimitry Andric       if (flags()->log_pointers) \
730eae32dcSDimitry Andric         Report(__VA_ARGS__);     \
7468d75effSDimitry Andric     } while (0)
7568d75effSDimitry Andric 
7668d75effSDimitry Andric #  define LOG_THREADS(...)      \
7768d75effSDimitry Andric     do {                        \
780eae32dcSDimitry Andric       if (flags()->log_threads) \
790eae32dcSDimitry Andric         Report(__VA_ARGS__);    \
8068d75effSDimitry Andric     } while (0)
8168d75effSDimitry Andric 
82e8d8bef9SDimitry Andric class LeakSuppressionContext {
83e8d8bef9SDimitry Andric   bool parsed = false;
84e8d8bef9SDimitry Andric   SuppressionContext context;
85e8d8bef9SDimitry Andric   bool suppressed_stacks_sorted = true;
86e8d8bef9SDimitry Andric   InternalMmapVector<u32> suppressed_stacks;
870eae32dcSDimitry Andric   const LoadedModule *suppress_module = nullptr;
88e8d8bef9SDimitry Andric 
89e8d8bef9SDimitry Andric   void LazyInit();
900eae32dcSDimitry Andric   Suppression *GetSuppressionForAddr(uptr addr);
910eae32dcSDimitry Andric   bool SuppressInvalid(const StackTrace &stack);
920eae32dcSDimitry Andric   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
93e8d8bef9SDimitry Andric 
94e8d8bef9SDimitry Andric  public:
95e8d8bef9SDimitry Andric   LeakSuppressionContext(const char *supprression_types[],
96e8d8bef9SDimitry Andric                          int suppression_types_num)
97e8d8bef9SDimitry Andric       : context(supprression_types, suppression_types_num) {}
98e8d8bef9SDimitry Andric 
990eae32dcSDimitry Andric   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
100e8d8bef9SDimitry Andric 
101e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
102e8d8bef9SDimitry Andric     if (!suppressed_stacks_sorted) {
103e8d8bef9SDimitry Andric       suppressed_stacks_sorted = true;
104e8d8bef9SDimitry Andric       SortAndDedup(suppressed_stacks);
105e8d8bef9SDimitry Andric     }
106e8d8bef9SDimitry Andric     return suppressed_stacks;
107e8d8bef9SDimitry Andric   }
108e8d8bef9SDimitry Andric   void PrintMatchedSuppressions();
109e8d8bef9SDimitry Andric };
110e8d8bef9SDimitry Andric 
111*0fca6ea1SDimitry Andric alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
112e8d8bef9SDimitry Andric static LeakSuppressionContext *suppression_ctx = nullptr;
11368d75effSDimitry Andric static const char kSuppressionLeak[] = "leak";
11468d75effSDimitry Andric static const char *kSuppressionTypes[] = {kSuppressionLeak};
11568d75effSDimitry Andric static const char kStdSuppressions[] =
11668d75effSDimitry Andric #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
11768d75effSDimitry Andric     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
11868d75effSDimitry Andric     // definition.
11968d75effSDimitry Andric     "leak:*pthread_exit*\n"
12068d75effSDimitry Andric #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
12181ad6265SDimitry Andric #  if SANITIZER_APPLE
12268d75effSDimitry Andric     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
12368d75effSDimitry Andric     "leak:*_os_trace*\n"
12468d75effSDimitry Andric #  endif
12568d75effSDimitry Andric     // TLS leak in some glibc versions, described in
12668d75effSDimitry Andric     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
12768d75effSDimitry Andric     "leak:*tls_get_addr*\n";
12868d75effSDimitry Andric 
12968d75effSDimitry Andric void InitializeSuppressions() {
13068d75effSDimitry Andric   CHECK_EQ(nullptr, suppression_ctx);
13168d75effSDimitry Andric   suppression_ctx = new (suppression_placeholder)
132e8d8bef9SDimitry Andric       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
13368d75effSDimitry Andric }
13468d75effSDimitry Andric 
135e8d8bef9SDimitry Andric void LeakSuppressionContext::LazyInit() {
136e8d8bef9SDimitry Andric   if (!parsed) {
137e8d8bef9SDimitry Andric     parsed = true;
138e8d8bef9SDimitry Andric     context.ParseFromFile(flags()->suppressions);
139e8d8bef9SDimitry Andric     if (&__lsan_default_suppressions)
140e8d8bef9SDimitry Andric       context.Parse(__lsan_default_suppressions());
141e8d8bef9SDimitry Andric     context.Parse(kStdSuppressions);
1420eae32dcSDimitry Andric     if (flags()->use_tls && flags()->use_ld_allocations)
1430eae32dcSDimitry Andric       suppress_module = GetLinker();
144e8d8bef9SDimitry Andric   }
145e8d8bef9SDimitry Andric }
146e8d8bef9SDimitry Andric 
1470eae32dcSDimitry Andric Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
1480eae32dcSDimitry Andric   Suppression *s = nullptr;
1490eae32dcSDimitry Andric 
1500eae32dcSDimitry Andric   // Suppress by module name.
15181ad6265SDimitry Andric   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
15281ad6265SDimitry Andric   if (!module_name)
15381ad6265SDimitry Andric     module_name = "<unknown module>";
1540eae32dcSDimitry Andric   if (context.Match(module_name, kSuppressionLeak, &s))
1550eae32dcSDimitry Andric     return s;
1560eae32dcSDimitry Andric 
1570eae32dcSDimitry Andric   // Suppress by file or function name.
1581db9f3b2SDimitry Andric   SymbolizedStackHolder symbolized_stack(
1591db9f3b2SDimitry Andric       Symbolizer::GetOrInit()->SymbolizePC(addr));
1601db9f3b2SDimitry Andric   const SymbolizedStack *frames = symbolized_stack.get();
1611db9f3b2SDimitry Andric   for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
1620eae32dcSDimitry Andric     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
1630eae32dcSDimitry Andric         context.Match(cur->info.file, kSuppressionLeak, &s)) {
1640eae32dcSDimitry Andric       break;
1650eae32dcSDimitry Andric     }
1660eae32dcSDimitry Andric   }
1670eae32dcSDimitry Andric   return s;
1680eae32dcSDimitry Andric }
1690eae32dcSDimitry Andric 
1700eae32dcSDimitry Andric static uptr GetCallerPC(const StackTrace &stack) {
1710eae32dcSDimitry Andric   // The top frame is our malloc/calloc/etc. The next frame is the caller.
1720eae32dcSDimitry Andric   if (stack.size >= 2)
1730eae32dcSDimitry Andric     return stack.trace[1];
1740eae32dcSDimitry Andric   return 0;
1750eae32dcSDimitry Andric }
1760eae32dcSDimitry Andric 
177bdd1243dSDimitry Andric #  if SANITIZER_APPLE
17806c3fb27SDimitry Andric // Several pointers in the Objective-C runtime (method cache and class_rw_t,
17906c3fb27SDimitry Andric // for example) are tagged with additional bits we need to strip.
18006c3fb27SDimitry Andric static inline void *TransformPointer(void *p) {
181bdd1243dSDimitry Andric   uptr ptr = reinterpret_cast<uptr>(p);
18206c3fb27SDimitry Andric   return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
183bdd1243dSDimitry Andric }
184bdd1243dSDimitry Andric #  endif
185bdd1243dSDimitry Andric 
1860eae32dcSDimitry Andric // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
1870eae32dcSDimitry Andric // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
1880eae32dcSDimitry Andric // modules accounting etc.
1890eae32dcSDimitry Andric // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
1900eae32dcSDimitry Andric // They are allocated with a __libc_memalign() call in allocate_and_init()
1910eae32dcSDimitry Andric // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
1920eae32dcSDimitry Andric // blocks, but we can make sure they come from our own allocator by intercepting
1930eae32dcSDimitry Andric // __libc_memalign(). On top of that, there is no easy way to reach them. Their
1940eae32dcSDimitry Andric // addresses are stored in a dynamically allocated array (the DTV) which is
1950eae32dcSDimitry Andric // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
1960eae32dcSDimitry Andric // being reachable from the static TLS, and the dynamic TLS being reachable from
1970eae32dcSDimitry Andric // the DTV. This is because the initial DTV is allocated before our interception
1980eae32dcSDimitry Andric // mechanism kicks in, and thus we don't recognize it as allocated memory. We
1990eae32dcSDimitry Andric // can't special-case it either, since we don't know its size.
2000eae32dcSDimitry Andric // Our solution is to include in the root set all allocations made from
2010eae32dcSDimitry Andric // ld-linux.so (which is where allocate_and_init() is implemented). This is
2020eae32dcSDimitry Andric // guaranteed to include all dynamic TLS blocks (and possibly other allocations
2030eae32dcSDimitry Andric // which we don't care about).
2040eae32dcSDimitry Andric // On all other platforms, this simply checks to ensure that the caller pc is
2050eae32dcSDimitry Andric // valid before reporting chunks as leaked.
2060eae32dcSDimitry Andric bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
2070eae32dcSDimitry Andric   uptr caller_pc = GetCallerPC(stack);
2080eae32dcSDimitry Andric   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
2090eae32dcSDimitry Andric   // it as reachable, as we can't properly report its allocation stack anyway.
2100eae32dcSDimitry Andric   return !caller_pc ||
2110eae32dcSDimitry Andric          (suppress_module && suppress_module->containsAddress(caller_pc));
2120eae32dcSDimitry Andric }
2130eae32dcSDimitry Andric 
2140eae32dcSDimitry Andric bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
2150eae32dcSDimitry Andric                                             uptr hit_count, uptr total_size) {
2160eae32dcSDimitry Andric   for (uptr i = 0; i < stack.size; i++) {
2170eae32dcSDimitry Andric     Suppression *s = GetSuppressionForAddr(
2180eae32dcSDimitry Andric         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
2190eae32dcSDimitry Andric     if (s) {
2200eae32dcSDimitry Andric       s->weight += total_size;
2210eae32dcSDimitry Andric       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
2220eae32dcSDimitry Andric       return true;
2230eae32dcSDimitry Andric     }
2240eae32dcSDimitry Andric   }
2250eae32dcSDimitry Andric   return false;
2260eae32dcSDimitry Andric }
2270eae32dcSDimitry Andric 
2280eae32dcSDimitry Andric bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
2290eae32dcSDimitry Andric                                       uptr total_size) {
2300eae32dcSDimitry Andric   LazyInit();
2310eae32dcSDimitry Andric   StackTrace stack = StackDepotGet(stack_trace_id);
2320eae32dcSDimitry Andric   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
2330eae32dcSDimitry Andric     return false;
2340eae32dcSDimitry Andric   suppressed_stacks_sorted = false;
2350eae32dcSDimitry Andric   suppressed_stacks.push_back(stack_trace_id);
2360eae32dcSDimitry Andric   return true;
2370eae32dcSDimitry Andric }
2380eae32dcSDimitry Andric 
239e8d8bef9SDimitry Andric static LeakSuppressionContext *GetSuppressionContext() {
24068d75effSDimitry Andric   CHECK(suppression_ctx);
24168d75effSDimitry Andric   return suppression_ctx;
24268d75effSDimitry Andric }
24368d75effSDimitry Andric 
24468d75effSDimitry Andric void InitCommonLsan() {
24568d75effSDimitry Andric   if (common_flags()->detect_leaks) {
24668d75effSDimitry Andric     // Initialization which can fail or print warnings should only be done if
24768d75effSDimitry Andric     // LSan is actually enabled.
24868d75effSDimitry Andric     InitializeSuppressions();
24968d75effSDimitry Andric     InitializePlatformSpecificModules();
25068d75effSDimitry Andric   }
25168d75effSDimitry Andric }
25268d75effSDimitry Andric 
25368d75effSDimitry Andric class Decorator : public __sanitizer::SanitizerCommonDecorator {
25468d75effSDimitry Andric  public:
25568d75effSDimitry Andric   Decorator() : SanitizerCommonDecorator() {}
25668d75effSDimitry Andric   const char *Error() { return Red(); }
25768d75effSDimitry Andric   const char *Leak() { return Blue(); }
25868d75effSDimitry Andric };
25968d75effSDimitry Andric 
26081ad6265SDimitry Andric static inline bool MaybeUserPointer(uptr p) {
26168d75effSDimitry Andric   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
26268d75effSDimitry Andric   // bound on heap addresses.
26368d75effSDimitry Andric   const uptr kMinAddress = 4 * 4096;
2640eae32dcSDimitry Andric   if (p < kMinAddress)
2650eae32dcSDimitry Andric     return false;
26668d75effSDimitry Andric #  if defined(__x86_64__)
26706c3fb27SDimitry Andric   // TODO: support LAM48 and 5 level page tables.
26806c3fb27SDimitry Andric   // LAM_U57 mask format
26906c3fb27SDimitry Andric   //  * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
27006c3fb27SDimitry Andric   //  * top-1 byte: 0xff because it should be 0
27106c3fb27SDimitry Andric   //  * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
27206c3fb27SDimitry Andric   constexpr uptr kLAM_U57Mask = 0x81ff80;
27306c3fb27SDimitry Andric   constexpr uptr kPointerMask = kLAM_U57Mask << 40;
27406c3fb27SDimitry Andric   return ((p & kPointerMask) == 0);
27568d75effSDimitry Andric #  elif defined(__mips64)
27668d75effSDimitry Andric   return ((p >> 40) == 0);
27768d75effSDimitry Andric #  elif defined(__aarch64__)
27806c3fb27SDimitry Andric   // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
27906c3fb27SDimitry Andric   // address translation and can be used to store a tag.
28006c3fb27SDimitry Andric   constexpr uptr kPointerMask = 255ULL << 48;
28181ad6265SDimitry Andric   // Accept up to 48 bit VMA.
28206c3fb27SDimitry Andric   return ((p & kPointerMask) == 0);
283bdd1243dSDimitry Andric #  elif defined(__loongarch_lp64)
284bdd1243dSDimitry Andric   // Allow 47-bit user-space VMA at current.
285bdd1243dSDimitry Andric   return ((p >> 47) == 0);
28668d75effSDimitry Andric #  else
28768d75effSDimitry Andric   return true;
28868d75effSDimitry Andric #  endif
28968d75effSDimitry Andric }
29068d75effSDimitry Andric 
29168d75effSDimitry Andric // Scans the memory range, looking for byte patterns that point into allocator
29268d75effSDimitry Andric // chunks. Marks those chunks with |tag| and adds them to |frontier|.
29368d75effSDimitry Andric // There are two usage modes for this function: finding reachable chunks
29468d75effSDimitry Andric // (|tag| = kReachable) and finding indirectly leaked chunks
29568d75effSDimitry Andric // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
29668d75effSDimitry Andric // so |frontier| = 0.
2970eae32dcSDimitry Andric void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
29868d75effSDimitry Andric                           const char *region_type, ChunkTag tag) {
29968d75effSDimitry Andric   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
30068d75effSDimitry Andric   const uptr alignment = flags()->pointer_alignment();
301349cc55cSDimitry Andric   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
302349cc55cSDimitry Andric                (void *)end);
30368d75effSDimitry Andric   uptr pp = begin;
30468d75effSDimitry Andric   if (pp % alignment)
30568d75effSDimitry Andric     pp = pp + alignment - pp % alignment;
30668d75effSDimitry Andric   for (; pp + sizeof(void *) <= end; pp += alignment) {
30768d75effSDimitry Andric     void *p = *reinterpret_cast<void **>(pp);
308bdd1243dSDimitry Andric #  if SANITIZER_APPLE
30906c3fb27SDimitry Andric     p = TransformPointer(p);
310bdd1243dSDimitry Andric #  endif
31181ad6265SDimitry Andric     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
3120eae32dcSDimitry Andric       continue;
31368d75effSDimitry Andric     uptr chunk = PointsIntoChunk(p);
3140eae32dcSDimitry Andric     if (!chunk)
3150eae32dcSDimitry Andric       continue;
31668d75effSDimitry Andric     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
3170eae32dcSDimitry Andric     if (chunk == begin)
3180eae32dcSDimitry Andric       continue;
31968d75effSDimitry Andric     LsanMetadata m(chunk);
3200eae32dcSDimitry Andric     if (m.tag() == kReachable || m.tag() == kIgnored)
3210eae32dcSDimitry Andric       continue;
32268d75effSDimitry Andric 
32368d75effSDimitry Andric     // Do this check relatively late so we can log only the interesting cases.
32468d75effSDimitry Andric     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
32568d75effSDimitry Andric       LOG_POINTERS(
32668d75effSDimitry Andric           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
32768d75effSDimitry Andric           "%zu.\n",
328349cc55cSDimitry Andric           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
329349cc55cSDimitry Andric           m.requested_size());
33068d75effSDimitry Andric       continue;
33168d75effSDimitry Andric     }
33268d75effSDimitry Andric 
33368d75effSDimitry Andric     m.set_tag(tag);
334349cc55cSDimitry Andric     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
335349cc55cSDimitry Andric                  (void *)pp, p, (void *)chunk,
336349cc55cSDimitry Andric                  (void *)(chunk + m.requested_size()), m.requested_size());
33768d75effSDimitry Andric     if (frontier)
33868d75effSDimitry Andric       frontier->push_back(chunk);
33968d75effSDimitry Andric   }
34068d75effSDimitry Andric }
34168d75effSDimitry Andric 
34268d75effSDimitry Andric // Scans a global range for pointers
34368d75effSDimitry Andric void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
34468d75effSDimitry Andric   uptr allocator_begin = 0, allocator_end = 0;
34568d75effSDimitry Andric   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
34668d75effSDimitry Andric   if (begin <= allocator_begin && allocator_begin < end) {
34768d75effSDimitry Andric     CHECK_LE(allocator_begin, allocator_end);
34868d75effSDimitry Andric     CHECK_LE(allocator_end, end);
34968d75effSDimitry Andric     if (begin < allocator_begin)
35068d75effSDimitry Andric       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
35168d75effSDimitry Andric                            kReachable);
35268d75effSDimitry Andric     if (allocator_end < end)
35368d75effSDimitry Andric       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
35468d75effSDimitry Andric   } else {
35568d75effSDimitry Andric     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
35668d75effSDimitry Andric   }
35768d75effSDimitry Andric }
35868d75effSDimitry Andric 
359bdd1243dSDimitry Andric void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
360bdd1243dSDimitry Andric                           Frontier *frontier) {
361bdd1243dSDimitry Andric   for (uptr i = 0; i < ranges.size(); i++) {
362bdd1243dSDimitry Andric     ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
363bdd1243dSDimitry Andric                          kReachable);
364bdd1243dSDimitry Andric   }
36568d75effSDimitry Andric }
36668d75effSDimitry Andric 
3675ffd83dbSDimitry Andric #  if SANITIZER_FUCHSIA
3685ffd83dbSDimitry Andric 
3695ffd83dbSDimitry Andric // Fuchsia handles all threads together with its own callback.
370bdd1243dSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
371bdd1243dSDimitry Andric                            uptr) {}
3725ffd83dbSDimitry Andric 
3735ffd83dbSDimitry Andric #  else
3745ffd83dbSDimitry Andric 
375e8d8bef9SDimitry Andric #    if SANITIZER_ANDROID
376e8d8bef9SDimitry Andric // FIXME: Move this out into *libcdep.cpp
377e8d8bef9SDimitry Andric extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
378e8d8bef9SDimitry Andric     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
379e8d8bef9SDimitry Andric #    endif
380e8d8bef9SDimitry Andric 
381e8d8bef9SDimitry Andric static void ProcessThreadRegistry(Frontier *frontier) {
382e8d8bef9SDimitry Andric   InternalMmapVector<uptr> ptrs;
383bdd1243dSDimitry Andric   GetAdditionalThreadContextPtrsLocked(&ptrs);
384e8d8bef9SDimitry Andric 
385e8d8bef9SDimitry Andric   for (uptr i = 0; i < ptrs.size(); ++i) {
386e8d8bef9SDimitry Andric     void *ptr = reinterpret_cast<void *>(ptrs[i]);
387e8d8bef9SDimitry Andric     uptr chunk = PointsIntoChunk(ptr);
388e8d8bef9SDimitry Andric     if (!chunk)
389e8d8bef9SDimitry Andric       continue;
390e8d8bef9SDimitry Andric     LsanMetadata m(chunk);
391e8d8bef9SDimitry Andric     if (!m.allocated())
392e8d8bef9SDimitry Andric       continue;
393e8d8bef9SDimitry Andric 
394e8d8bef9SDimitry Andric     // Mark as reachable and add to frontier.
395e8d8bef9SDimitry Andric     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
396e8d8bef9SDimitry Andric     m.set_tag(kReachable);
397e8d8bef9SDimitry Andric     frontier->push_back(chunk);
398e8d8bef9SDimitry Andric   }
399e8d8bef9SDimitry Andric }
400e8d8bef9SDimitry Andric 
40168d75effSDimitry Andric // Scans thread data (stacks and TLS) for heap pointers.
40268d75effSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
403bdd1243dSDimitry Andric                            Frontier *frontier, tid_t caller_tid,
404bdd1243dSDimitry Andric                            uptr caller_sp) {
405e8d8bef9SDimitry Andric   InternalMmapVector<uptr> registers;
406bdd1243dSDimitry Andric   InternalMmapVector<Range> extra_ranges;
40768d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
40868d75effSDimitry Andric     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
409349cc55cSDimitry Andric     LOG_THREADS("Processing thread %llu.\n", os_id);
41068d75effSDimitry Andric     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
41168d75effSDimitry Andric     DTLS *dtls;
4120eae32dcSDimitry Andric     bool thread_found =
4130eae32dcSDimitry Andric         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
4140eae32dcSDimitry Andric                               &tls_end, &cache_begin, &cache_end, &dtls);
41568d75effSDimitry Andric     if (!thread_found) {
41668d75effSDimitry Andric       // If a thread can't be found in the thread registry, it's probably in the
41768d75effSDimitry Andric       // process of destruction. Log this event and move on.
418349cc55cSDimitry Andric       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
41968d75effSDimitry Andric       continue;
42068d75effSDimitry Andric     }
42168d75effSDimitry Andric     uptr sp;
42268d75effSDimitry Andric     PtraceRegistersStatus have_registers =
423e8d8bef9SDimitry Andric         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
42468d75effSDimitry Andric     if (have_registers != REGISTERS_AVAILABLE) {
425349cc55cSDimitry Andric       Report("Unable to get registers from thread %llu.\n", os_id);
42668d75effSDimitry Andric       // If unable to get SP, consider the entire stack to be reachable unless
42768d75effSDimitry Andric       // GetRegistersAndSP failed with ESRCH.
4280eae32dcSDimitry Andric       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
4290eae32dcSDimitry Andric         continue;
43068d75effSDimitry Andric       sp = stack_begin;
43168d75effSDimitry Andric     }
432bdd1243dSDimitry Andric     if (suspended_threads.GetThreadID(i) == caller_tid) {
433bdd1243dSDimitry Andric       sp = caller_sp;
434bdd1243dSDimitry Andric     }
43568d75effSDimitry Andric 
436e8d8bef9SDimitry Andric     if (flags()->use_registers && have_registers) {
437e8d8bef9SDimitry Andric       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
438e8d8bef9SDimitry Andric       uptr registers_end =
439e8d8bef9SDimitry Andric           reinterpret_cast<uptr>(registers.data() + registers.size());
44068d75effSDimitry Andric       ScanRangeForPointers(registers_begin, registers_end, frontier,
44168d75effSDimitry Andric                            "REGISTERS", kReachable);
442e8d8bef9SDimitry Andric     }
44368d75effSDimitry Andric 
44468d75effSDimitry Andric     if (flags()->use_stacks) {
445349cc55cSDimitry Andric       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
446349cc55cSDimitry Andric                   (void *)stack_end, (void *)sp);
44768d75effSDimitry Andric       if (sp < stack_begin || sp >= stack_end) {
44868d75effSDimitry Andric         // SP is outside the recorded stack range (e.g. the thread is running a
44968d75effSDimitry Andric         // signal handler on alternate stack, or swapcontext was used).
45068d75effSDimitry Andric         // Again, consider the entire stack range to be reachable.
45168d75effSDimitry Andric         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
45268d75effSDimitry Andric         uptr page_size = GetPageSizeCached();
45368d75effSDimitry Andric         int skipped = 0;
45468d75effSDimitry Andric         while (stack_begin < stack_end &&
45568d75effSDimitry Andric                !IsAccessibleMemoryRange(stack_begin, 1)) {
45668d75effSDimitry Andric           skipped++;
45768d75effSDimitry Andric           stack_begin += page_size;
45868d75effSDimitry Andric         }
45968d75effSDimitry Andric         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
460349cc55cSDimitry Andric                     skipped, (void *)stack_begin, (void *)stack_end);
46168d75effSDimitry Andric       } else {
46268d75effSDimitry Andric         // Shrink the stack range to ignore out-of-scope values.
46368d75effSDimitry Andric         stack_begin = sp;
46468d75effSDimitry Andric       }
46568d75effSDimitry Andric       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
46668d75effSDimitry Andric                            kReachable);
467bdd1243dSDimitry Andric       extra_ranges.clear();
468bdd1243dSDimitry Andric       GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
469bdd1243dSDimitry Andric       ScanExtraStackRanges(extra_ranges, frontier);
47068d75effSDimitry Andric     }
47168d75effSDimitry Andric 
47268d75effSDimitry Andric     if (flags()->use_tls) {
47368d75effSDimitry Andric       if (tls_begin) {
474349cc55cSDimitry Andric         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
47568d75effSDimitry Andric         // If the tls and cache ranges don't overlap, scan full tls range,
47668d75effSDimitry Andric         // otherwise, only scan the non-overlapping portions
47768d75effSDimitry Andric         if (cache_begin == cache_end || tls_end < cache_begin ||
47868d75effSDimitry Andric             tls_begin > cache_end) {
47968d75effSDimitry Andric           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
48068d75effSDimitry Andric         } else {
48168d75effSDimitry Andric           if (tls_begin < cache_begin)
48268d75effSDimitry Andric             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
48368d75effSDimitry Andric                                  kReachable);
48468d75effSDimitry Andric           if (tls_end > cache_end)
48568d75effSDimitry Andric             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
48668d75effSDimitry Andric                                  kReachable);
48768d75effSDimitry Andric         }
48868d75effSDimitry Andric       }
489e8d8bef9SDimitry Andric #    if SANITIZER_ANDROID
490e8d8bef9SDimitry Andric       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
491e8d8bef9SDimitry Andric                      void *arg) -> void {
492e8d8bef9SDimitry Andric         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
493e8d8bef9SDimitry Andric                              reinterpret_cast<uptr>(dtls_end),
494e8d8bef9SDimitry Andric                              reinterpret_cast<Frontier *>(arg), "DTLS",
495e8d8bef9SDimitry Andric                              kReachable);
496e8d8bef9SDimitry Andric       };
497e8d8bef9SDimitry Andric 
498e8d8bef9SDimitry Andric       // FIXME: There might be a race-condition here (and in Bionic) if the
499e8d8bef9SDimitry Andric       // thread is suspended in the middle of updating its DTLS. IOWs, we
500e8d8bef9SDimitry Andric       // could scan already freed memory. (probably fine for now)
501e8d8bef9SDimitry Andric       __libc_iterate_dynamic_tls(os_id, cb, frontier);
502e8d8bef9SDimitry Andric #    else
50368d75effSDimitry Andric       if (dtls && !DTLSInDestruction(dtls)) {
504e8d8bef9SDimitry Andric         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
505e8d8bef9SDimitry Andric           uptr dtls_beg = dtv.beg;
506e8d8bef9SDimitry Andric           uptr dtls_end = dtls_beg + dtv.size;
50768d75effSDimitry Andric           if (dtls_beg < dtls_end) {
508349cc55cSDimitry Andric             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
509349cc55cSDimitry Andric                         (void *)dtls_end);
51068d75effSDimitry Andric             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
51168d75effSDimitry Andric                                  kReachable);
51268d75effSDimitry Andric           }
513e8d8bef9SDimitry Andric         });
51468d75effSDimitry Andric       } else {
51568d75effSDimitry Andric         // We are handling a thread with DTLS under destruction. Log about
51668d75effSDimitry Andric         // this and continue.
517349cc55cSDimitry Andric         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
51868d75effSDimitry Andric       }
519e8d8bef9SDimitry Andric #    endif
52068d75effSDimitry Andric     }
52168d75effSDimitry Andric   }
522e8d8bef9SDimitry Andric 
523e8d8bef9SDimitry Andric   // Add pointers reachable from ThreadContexts
524e8d8bef9SDimitry Andric   ProcessThreadRegistry(frontier);
52568d75effSDimitry Andric }
52668d75effSDimitry Andric 
5275ffd83dbSDimitry Andric #  endif  // SANITIZER_FUCHSIA
5285ffd83dbSDimitry Andric 
52906c3fb27SDimitry Andric // A map that contains [region_begin, region_end) pairs.
53006c3fb27SDimitry Andric using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
53106c3fb27SDimitry Andric 
53206c3fb27SDimitry Andric static RootRegions &GetRootRegionsLocked() {
53306c3fb27SDimitry Andric   global_mutex.CheckLocked();
53406c3fb27SDimitry Andric   static RootRegions *regions = nullptr;
53506c3fb27SDimitry Andric   alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
53606c3fb27SDimitry Andric   if (!regions)
53706c3fb27SDimitry Andric     regions = new (placeholder) RootRegions();
53806c3fb27SDimitry Andric   return *regions;
53968d75effSDimitry Andric }
54068d75effSDimitry Andric 
54106c3fb27SDimitry Andric bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
54206c3fb27SDimitry Andric 
54306c3fb27SDimitry Andric void ScanRootRegions(Frontier *frontier,
54406c3fb27SDimitry Andric                      const InternalMmapVectorNoCtor<Region> &mapped_regions) {
54506c3fb27SDimitry Andric   if (!flags()->use_root_regions)
54606c3fb27SDimitry Andric     return;
54706c3fb27SDimitry Andric 
54806c3fb27SDimitry Andric   InternalMmapVector<Region> regions;
54906c3fb27SDimitry Andric   GetRootRegionsLocked().forEach([&](const auto &kv) {
55006c3fb27SDimitry Andric     regions.push_back({kv.first.first, kv.first.second});
55106c3fb27SDimitry Andric     return true;
55206c3fb27SDimitry Andric   });
55306c3fb27SDimitry Andric 
55406c3fb27SDimitry Andric   InternalMmapVector<Region> intersection;
55506c3fb27SDimitry Andric   Intersect(mapped_regions, regions, intersection);
55606c3fb27SDimitry Andric 
55706c3fb27SDimitry Andric   for (const Region &r : intersection) {
55806c3fb27SDimitry Andric     LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
55906c3fb27SDimitry Andric                  (void *)r.begin, (void *)r.end);
56006c3fb27SDimitry Andric     ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
56168d75effSDimitry Andric   }
56268d75effSDimitry Andric }
56368d75effSDimitry Andric 
56468d75effSDimitry Andric // Scans root regions for heap pointers.
56568d75effSDimitry Andric static void ProcessRootRegions(Frontier *frontier) {
56606c3fb27SDimitry Andric   if (!flags()->use_root_regions || !HasRootRegions())
5670eae32dcSDimitry Andric     return;
56806c3fb27SDimitry Andric   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
56906c3fb27SDimitry Andric   MemoryMappedSegment segment;
57006c3fb27SDimitry Andric   InternalMmapVector<Region> mapped_regions;
57106c3fb27SDimitry Andric   while (proc_maps.Next(&segment))
57206c3fb27SDimitry Andric     if (segment.IsReadable())
57306c3fb27SDimitry Andric       mapped_regions.push_back({segment.start, segment.end});
57406c3fb27SDimitry Andric   ScanRootRegions(frontier, mapped_regions);
57568d75effSDimitry Andric }
57668d75effSDimitry Andric 
57768d75effSDimitry Andric static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
57868d75effSDimitry Andric   while (frontier->size()) {
57968d75effSDimitry Andric     uptr next_chunk = frontier->back();
58068d75effSDimitry Andric     frontier->pop_back();
58168d75effSDimitry Andric     LsanMetadata m(next_chunk);
58268d75effSDimitry Andric     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
58368d75effSDimitry Andric                          "HEAP", tag);
58468d75effSDimitry Andric   }
58568d75effSDimitry Andric }
58668d75effSDimitry Andric 
58768d75effSDimitry Andric // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
58868d75effSDimitry Andric // which are reachable from it as indirectly leaked.
58968d75effSDimitry Andric static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
59068d75effSDimitry Andric   chunk = GetUserBegin(chunk);
59168d75effSDimitry Andric   LsanMetadata m(chunk);
59268d75effSDimitry Andric   if (m.allocated() && m.tag() != kReachable) {
59368d75effSDimitry Andric     ScanRangeForPointers(chunk, chunk + m.requested_size(),
59468d75effSDimitry Andric                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
59568d75effSDimitry Andric   }
59668d75effSDimitry Andric }
59768d75effSDimitry Andric 
598e8d8bef9SDimitry Andric static void IgnoredSuppressedCb(uptr chunk, void *arg) {
599e8d8bef9SDimitry Andric   CHECK(arg);
600e8d8bef9SDimitry Andric   chunk = GetUserBegin(chunk);
601e8d8bef9SDimitry Andric   LsanMetadata m(chunk);
602e8d8bef9SDimitry Andric   if (!m.allocated() || m.tag() == kIgnored)
603e8d8bef9SDimitry Andric     return;
604e8d8bef9SDimitry Andric 
605e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &suppressed =
606e8d8bef9SDimitry Andric       *static_cast<const InternalMmapVector<u32> *>(arg);
607e8d8bef9SDimitry Andric   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
608e8d8bef9SDimitry Andric   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
609e8d8bef9SDimitry Andric     return;
610e8d8bef9SDimitry Andric 
611349cc55cSDimitry Andric   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
612349cc55cSDimitry Andric                (void *)(chunk + m.requested_size()), m.requested_size());
613e8d8bef9SDimitry Andric   m.set_tag(kIgnored);
614e8d8bef9SDimitry Andric }
615e8d8bef9SDimitry Andric 
61668d75effSDimitry Andric // ForEachChunk callback. If chunk is marked as ignored, adds its address to
61768d75effSDimitry Andric // frontier.
61868d75effSDimitry Andric static void CollectIgnoredCb(uptr chunk, void *arg) {
61968d75effSDimitry Andric   CHECK(arg);
62068d75effSDimitry Andric   chunk = GetUserBegin(chunk);
62168d75effSDimitry Andric   LsanMetadata m(chunk);
62268d75effSDimitry Andric   if (m.allocated() && m.tag() == kIgnored) {
623349cc55cSDimitry Andric     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
624349cc55cSDimitry Andric                  (void *)(chunk + m.requested_size()), m.requested_size());
62568d75effSDimitry Andric     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
62668d75effSDimitry Andric   }
62768d75effSDimitry Andric }
62868d75effSDimitry Andric 
62968d75effSDimitry Andric // Sets the appropriate tag on each chunk.
6305ffd83dbSDimitry Andric static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
631bdd1243dSDimitry Andric                               Frontier *frontier, tid_t caller_tid,
632bdd1243dSDimitry Andric                               uptr caller_sp) {
633e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &suppressed_stacks =
634e8d8bef9SDimitry Andric       GetSuppressionContext()->GetSortedSuppressedStacks();
635e8d8bef9SDimitry Andric   if (!suppressed_stacks.empty()) {
636e8d8bef9SDimitry Andric     ForEachChunk(IgnoredSuppressedCb,
637e8d8bef9SDimitry Andric                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
638e8d8bef9SDimitry Andric   }
6395ffd83dbSDimitry Andric   ForEachChunk(CollectIgnoredCb, frontier);
6405ffd83dbSDimitry Andric   ProcessGlobalRegions(frontier);
641bdd1243dSDimitry Andric   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
6425ffd83dbSDimitry Andric   ProcessRootRegions(frontier);
6435ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
64468d75effSDimitry Andric 
64568d75effSDimitry Andric   // The check here is relatively expensive, so we do this in a separate flood
64668d75effSDimitry Andric   // fill. That way we can skip the check for chunks that are reachable
64768d75effSDimitry Andric   // otherwise.
64868d75effSDimitry Andric   LOG_POINTERS("Processing platform-specific allocations.\n");
6495ffd83dbSDimitry Andric   ProcessPlatformSpecificAllocations(frontier);
6505ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
65168d75effSDimitry Andric 
65268d75effSDimitry Andric   // Iterate over leaked chunks and mark those that are reachable from other
65368d75effSDimitry Andric   // leaked chunks.
65468d75effSDimitry Andric   LOG_POINTERS("Scanning leaked chunks.\n");
65568d75effSDimitry Andric   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
65668d75effSDimitry Andric }
65768d75effSDimitry Andric 
65868d75effSDimitry Andric // ForEachChunk callback. Resets the tags to pre-leak-check state.
65968d75effSDimitry Andric static void ResetTagsCb(uptr chunk, void *arg) {
66068d75effSDimitry Andric   (void)arg;
66168d75effSDimitry Andric   chunk = GetUserBegin(chunk);
66268d75effSDimitry Andric   LsanMetadata m(chunk);
66368d75effSDimitry Andric   if (m.allocated() && m.tag() != kIgnored)
66468d75effSDimitry Andric     m.set_tag(kDirectlyLeaked);
66568d75effSDimitry Andric }
66668d75effSDimitry Andric 
66768d75effSDimitry Andric // ForEachChunk callback. Aggregates information about unreachable chunks into
66868d75effSDimitry Andric // a LeakReport.
66968d75effSDimitry Andric static void CollectLeaksCb(uptr chunk, void *arg) {
67068d75effSDimitry Andric   CHECK(arg);
6710eae32dcSDimitry Andric   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
67268d75effSDimitry Andric   chunk = GetUserBegin(chunk);
67368d75effSDimitry Andric   LsanMetadata m(chunk);
6740eae32dcSDimitry Andric   if (!m.allocated())
6750eae32dcSDimitry Andric     return;
6760eae32dcSDimitry Andric   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
6770eae32dcSDimitry Andric     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
67868d75effSDimitry Andric }
67968d75effSDimitry Andric 
680e8d8bef9SDimitry Andric void LeakSuppressionContext::PrintMatchedSuppressions() {
68168d75effSDimitry Andric   InternalMmapVector<Suppression *> matched;
682e8d8bef9SDimitry Andric   context.GetMatched(&matched);
68368d75effSDimitry Andric   if (!matched.size())
68468d75effSDimitry Andric     return;
68568d75effSDimitry Andric   const char *line = "-----------------------------------------------------";
68668d75effSDimitry Andric   Printf("%s\n", line);
68768d75effSDimitry Andric   Printf("Suppressions used:\n");
68868d75effSDimitry Andric   Printf("  count      bytes template\n");
689e8d8bef9SDimitry Andric   for (uptr i = 0; i < matched.size(); i++) {
690e8d8bef9SDimitry Andric     Printf("%7zu %10zu %s\n",
691e8d8bef9SDimitry Andric            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
692e8d8bef9SDimitry Andric            matched[i]->weight, matched[i]->templ);
693e8d8bef9SDimitry Andric   }
69468d75effSDimitry Andric   Printf("%s\n\n", line);
69568d75effSDimitry Andric }
69668d75effSDimitry Andric 
6975ffd83dbSDimitry Andric #  if SANITIZER_FUCHSIA
6985ffd83dbSDimitry Andric 
6995ffd83dbSDimitry Andric // Fuchsia provides a libc interface that guarantees all threads are
7005ffd83dbSDimitry Andric // covered, and SuspendedThreadList is never really used.
7015ffd83dbSDimitry Andric static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
7025ffd83dbSDimitry Andric 
7035ffd83dbSDimitry Andric #  else  // !SANITIZER_FUCHSIA
7045ffd83dbSDimitry Andric 
70568d75effSDimitry Andric static void ReportUnsuspendedThreads(
70668d75effSDimitry Andric     const SuspendedThreadsList &suspended_threads) {
70768d75effSDimitry Andric   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
70868d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
70968d75effSDimitry Andric     threads[i] = suspended_threads.GetThreadID(i);
71068d75effSDimitry Andric 
71168d75effSDimitry Andric   Sort(threads.data(), threads.size());
71268d75effSDimitry Andric 
713bdd1243dSDimitry Andric   InternalMmapVector<tid_t> unsuspended;
714bdd1243dSDimitry Andric   GetRunningThreadsLocked(&unsuspended);
715bdd1243dSDimitry Andric 
716bdd1243dSDimitry Andric   for (auto os_id : unsuspended) {
717bdd1243dSDimitry Andric     uptr i = InternalLowerBound(threads, os_id);
718bdd1243dSDimitry Andric     if (i >= threads.size() || threads[i] != os_id)
719bdd1243dSDimitry Andric       Report(
720bdd1243dSDimitry Andric           "Running thread %zu was not suspended. False leaks are possible.\n",
721bdd1243dSDimitry Andric           os_id);
722bdd1243dSDimitry Andric   }
72368d75effSDimitry Andric }
72468d75effSDimitry Andric 
7255ffd83dbSDimitry Andric #  endif  // !SANITIZER_FUCHSIA
7265ffd83dbSDimitry Andric 
72768d75effSDimitry Andric static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
72868d75effSDimitry Andric                                   void *arg) {
72968d75effSDimitry Andric   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
73068d75effSDimitry Andric   CHECK(param);
73168d75effSDimitry Andric   CHECK(!param->success);
73268d75effSDimitry Andric   ReportUnsuspendedThreads(suspended_threads);
733bdd1243dSDimitry Andric   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
734bdd1243dSDimitry Andric                     param->caller_sp);
7350eae32dcSDimitry Andric   ForEachChunk(CollectLeaksCb, &param->leaks);
73668d75effSDimitry Andric   // Clean up for subsequent leak checks. This assumes we did not overwrite any
73768d75effSDimitry Andric   // kIgnored tags.
73868d75effSDimitry Andric   ForEachChunk(ResetTagsCb, nullptr);
73968d75effSDimitry Andric   param->success = true;
74068d75effSDimitry Andric }
74168d75effSDimitry Andric 
742e8d8bef9SDimitry Andric static bool PrintResults(LeakReport &report) {
743e8d8bef9SDimitry Andric   uptr unsuppressed_count = report.UnsuppressedLeakCount();
744e8d8bef9SDimitry Andric   if (unsuppressed_count) {
745e8d8bef9SDimitry Andric     Decorator d;
746e8d8bef9SDimitry Andric     Printf(
747e8d8bef9SDimitry Andric         "\n"
748e8d8bef9SDimitry Andric         "================================================================="
749e8d8bef9SDimitry Andric         "\n");
750e8d8bef9SDimitry Andric     Printf("%s", d.Error());
751e8d8bef9SDimitry Andric     Report("ERROR: LeakSanitizer: detected memory leaks\n");
752e8d8bef9SDimitry Andric     Printf("%s", d.Default());
753e8d8bef9SDimitry Andric     report.ReportTopLeaks(flags()->max_leaks);
754e8d8bef9SDimitry Andric   }
755e8d8bef9SDimitry Andric   if (common_flags()->print_suppressions)
756e8d8bef9SDimitry Andric     GetSuppressionContext()->PrintMatchedSuppressions();
757e8d8bef9SDimitry Andric   if (unsuppressed_count > 0) {
758e8d8bef9SDimitry Andric     report.PrintSummary();
759e8d8bef9SDimitry Andric     return true;
760e8d8bef9SDimitry Andric   }
761e8d8bef9SDimitry Andric   return false;
762e8d8bef9SDimitry Andric }
763e8d8bef9SDimitry Andric 
76468d75effSDimitry Andric static bool CheckForLeaks() {
765bdd1243dSDimitry Andric   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
766bdd1243dSDimitry Andric     VReport(1, "LeakSanitizer is disabled");
76768d75effSDimitry Andric     return false;
768bdd1243dSDimitry Andric   }
769bdd1243dSDimitry Andric   VReport(1, "LeakSanitizer: checking for leaks");
770e8d8bef9SDimitry Andric   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
771e8d8bef9SDimitry Andric   // suppressions. However if a stack id was previously suppressed, it should be
772e8d8bef9SDimitry Andric   // suppressed in future checks as well.
773e8d8bef9SDimitry Andric   for (int i = 0;; ++i) {
77468d75effSDimitry Andric     EnsureMainThreadIDIsCorrect();
77568d75effSDimitry Andric     CheckForLeaksParam param;
776bdd1243dSDimitry Andric     // Capture calling thread's stack pointer early, to avoid false negatives.
777bdd1243dSDimitry Andric     // Old frame with dead pointers might be overlapped by new frame inside
778bdd1243dSDimitry Andric     // CheckForLeaks which does not use bytes with pointers before the
779bdd1243dSDimitry Andric     // threads are suspended and stack pointers captured.
780bdd1243dSDimitry Andric     param.caller_tid = GetTid();
781bdd1243dSDimitry Andric     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
78268d75effSDimitry Andric     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
78368d75effSDimitry Andric     if (!param.success) {
78468d75effSDimitry Andric       Report("LeakSanitizer has encountered a fatal error.\n");
78568d75effSDimitry Andric       Report(
78668d75effSDimitry Andric           "HINT: For debugging, try setting environment variable "
78768d75effSDimitry Andric           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
78868d75effSDimitry Andric       Report(
789e8d8bef9SDimitry Andric           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
790e8d8bef9SDimitry Andric           "etc)\n");
79168d75effSDimitry Andric       Die();
79268d75effSDimitry Andric     }
7930eae32dcSDimitry Andric     LeakReport leak_report;
7940eae32dcSDimitry Andric     leak_report.AddLeakedChunks(param.leaks);
7950eae32dcSDimitry Andric 
796e8d8bef9SDimitry Andric     // No new suppressions stacks, so rerun will not help and we can report.
7970eae32dcSDimitry Andric     if (!leak_report.ApplySuppressions())
7980eae32dcSDimitry Andric       return PrintResults(leak_report);
799e8d8bef9SDimitry Andric 
800e8d8bef9SDimitry Andric     // No indirect leaks to report, so we are done here.
8010eae32dcSDimitry Andric     if (!leak_report.IndirectUnsuppressedLeakCount())
8020eae32dcSDimitry Andric       return PrintResults(leak_report);
803e8d8bef9SDimitry Andric 
804e8d8bef9SDimitry Andric     if (i >= 8) {
805e8d8bef9SDimitry Andric       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
8060eae32dcSDimitry Andric       return PrintResults(leak_report);
80768d75effSDimitry Andric     }
808e8d8bef9SDimitry Andric 
809e8d8bef9SDimitry Andric     // We found a new previously unseen suppressed call stack. Rerun to make
810e8d8bef9SDimitry Andric     // sure it does not hold indirect leaks.
811e8d8bef9SDimitry Andric     VReport(1, "Rerun with %zu suppressed stacks.",
812e8d8bef9SDimitry Andric             GetSuppressionContext()->GetSortedSuppressedStacks().size());
81368d75effSDimitry Andric   }
81468d75effSDimitry Andric }
81568d75effSDimitry Andric 
81668d75effSDimitry Andric static bool has_reported_leaks = false;
81768d75effSDimitry Andric bool HasReportedLeaks() { return has_reported_leaks; }
81868d75effSDimitry Andric 
81968d75effSDimitry Andric void DoLeakCheck() {
820349cc55cSDimitry Andric   Lock l(&global_mutex);
82168d75effSDimitry Andric   static bool already_done;
8220eae32dcSDimitry Andric   if (already_done)
8230eae32dcSDimitry Andric     return;
82468d75effSDimitry Andric   already_done = true;
82568d75effSDimitry Andric   has_reported_leaks = CheckForLeaks();
8260eae32dcSDimitry Andric   if (has_reported_leaks)
8270eae32dcSDimitry Andric     HandleLeaks();
82868d75effSDimitry Andric }
82968d75effSDimitry Andric 
83068d75effSDimitry Andric static int DoRecoverableLeakCheck() {
831349cc55cSDimitry Andric   Lock l(&global_mutex);
83268d75effSDimitry Andric   bool have_leaks = CheckForLeaks();
83368d75effSDimitry Andric   return have_leaks ? 1 : 0;
83468d75effSDimitry Andric }
83568d75effSDimitry Andric 
83668d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
83768d75effSDimitry Andric 
83868d75effSDimitry Andric ///// LeakReport implementation. /////
83968d75effSDimitry Andric 
84068d75effSDimitry Andric // A hard limit on the number of distinct leaks, to avoid quadratic complexity
84168d75effSDimitry Andric // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
84268d75effSDimitry Andric // in real-world applications.
8430eae32dcSDimitry Andric // FIXME: Get rid of this limit by moving logic into DedupLeaks.
84468d75effSDimitry Andric const uptr kMaxLeaksConsidered = 5000;
84568d75effSDimitry Andric 
8460eae32dcSDimitry Andric void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
8470eae32dcSDimitry Andric   for (const LeakedChunk &leak : chunks) {
8480eae32dcSDimitry Andric     uptr chunk = leak.chunk;
8490eae32dcSDimitry Andric     u32 stack_trace_id = leak.stack_trace_id;
8500eae32dcSDimitry Andric     uptr leaked_size = leak.leaked_size;
8510eae32dcSDimitry Andric     ChunkTag tag = leak.tag;
85268d75effSDimitry Andric     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
853349cc55cSDimitry Andric 
854349cc55cSDimitry Andric     if (u32 resolution = flags()->resolution) {
855349cc55cSDimitry Andric       StackTrace stack = StackDepotGet(stack_trace_id);
856349cc55cSDimitry Andric       stack.size = Min(stack.size, resolution);
857349cc55cSDimitry Andric       stack_trace_id = StackDepotPut(stack);
858349cc55cSDimitry Andric     }
859349cc55cSDimitry Andric 
86068d75effSDimitry Andric     bool is_directly_leaked = (tag == kDirectlyLeaked);
86168d75effSDimitry Andric     uptr i;
86268d75effSDimitry Andric     for (i = 0; i < leaks_.size(); i++) {
86368d75effSDimitry Andric       if (leaks_[i].stack_trace_id == stack_trace_id &&
86468d75effSDimitry Andric           leaks_[i].is_directly_leaked == is_directly_leaked) {
86568d75effSDimitry Andric         leaks_[i].hit_count++;
86668d75effSDimitry Andric         leaks_[i].total_size += leaked_size;
86768d75effSDimitry Andric         break;
86868d75effSDimitry Andric       }
86968d75effSDimitry Andric     }
87068d75effSDimitry Andric     if (i == leaks_.size()) {
8710eae32dcSDimitry Andric       if (leaks_.size() == kMaxLeaksConsidered)
8720eae32dcSDimitry Andric         return;
8730eae32dcSDimitry Andric       Leak leak = {next_id_++,         /* hit_count */ 1,
8740eae32dcSDimitry Andric                    leaked_size,        stack_trace_id,
87568d75effSDimitry Andric                    is_directly_leaked, /* is_suppressed */ false};
87668d75effSDimitry Andric       leaks_.push_back(leak);
87768d75effSDimitry Andric     }
87868d75effSDimitry Andric     if (flags()->report_objects) {
87906c3fb27SDimitry Andric       LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
88068d75effSDimitry Andric       leaked_objects_.push_back(obj);
88168d75effSDimitry Andric     }
88268d75effSDimitry Andric   }
8830eae32dcSDimitry Andric }
88468d75effSDimitry Andric 
88568d75effSDimitry Andric static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
88668d75effSDimitry Andric   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
88768d75effSDimitry Andric     return leak1.total_size > leak2.total_size;
88868d75effSDimitry Andric   else
88968d75effSDimitry Andric     return leak1.is_directly_leaked;
89068d75effSDimitry Andric }
89168d75effSDimitry Andric 
89268d75effSDimitry Andric void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
89368d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
89468d75effSDimitry Andric   Printf("\n");
89568d75effSDimitry Andric   if (leaks_.size() == kMaxLeaksConsidered)
8960eae32dcSDimitry Andric     Printf(
8970eae32dcSDimitry Andric         "Too many leaks! Only the first %zu leaks encountered will be "
89868d75effSDimitry Andric         "reported.\n",
89968d75effSDimitry Andric         kMaxLeaksConsidered);
90068d75effSDimitry Andric 
90168d75effSDimitry Andric   uptr unsuppressed_count = UnsuppressedLeakCount();
90268d75effSDimitry Andric   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
90368d75effSDimitry Andric     Printf("The %zu top leak(s):\n", num_leaks_to_report);
90468d75effSDimitry Andric   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
90568d75effSDimitry Andric   uptr leaks_reported = 0;
90668d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
9070eae32dcSDimitry Andric     if (leaks_[i].is_suppressed)
9080eae32dcSDimitry Andric       continue;
90968d75effSDimitry Andric     PrintReportForLeak(i);
91068d75effSDimitry Andric     leaks_reported++;
9110eae32dcSDimitry Andric     if (leaks_reported == num_leaks_to_report)
9120eae32dcSDimitry Andric       break;
91368d75effSDimitry Andric   }
91468d75effSDimitry Andric   if (leaks_reported < unsuppressed_count) {
91568d75effSDimitry Andric     uptr remaining = unsuppressed_count - leaks_reported;
91668d75effSDimitry Andric     Printf("Omitting %zu more leak(s).\n", remaining);
91768d75effSDimitry Andric   }
91868d75effSDimitry Andric }
91968d75effSDimitry Andric 
92068d75effSDimitry Andric void LeakReport::PrintReportForLeak(uptr index) {
92168d75effSDimitry Andric   Decorator d;
92268d75effSDimitry Andric   Printf("%s", d.Leak());
92368d75effSDimitry Andric   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
92468d75effSDimitry Andric          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
92568d75effSDimitry Andric          leaks_[index].total_size, leaks_[index].hit_count);
92668d75effSDimitry Andric   Printf("%s", d.Default());
92768d75effSDimitry Andric 
928349cc55cSDimitry Andric   CHECK(leaks_[index].stack_trace_id);
929349cc55cSDimitry Andric   StackDepotGet(leaks_[index].stack_trace_id).Print();
93068d75effSDimitry Andric 
93168d75effSDimitry Andric   if (flags()->report_objects) {
93268d75effSDimitry Andric     Printf("Objects leaked above:\n");
93368d75effSDimitry Andric     PrintLeakedObjectsForLeak(index);
93468d75effSDimitry Andric     Printf("\n");
93568d75effSDimitry Andric   }
93668d75effSDimitry Andric }
93768d75effSDimitry Andric 
93868d75effSDimitry Andric void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
93968d75effSDimitry Andric   u32 leak_id = leaks_[index].id;
94068d75effSDimitry Andric   for (uptr j = 0; j < leaked_objects_.size(); j++) {
94168d75effSDimitry Andric     if (leaked_objects_[j].leak_id == leak_id)
942349cc55cSDimitry Andric       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
94368d75effSDimitry Andric              leaked_objects_[j].size);
94468d75effSDimitry Andric   }
94568d75effSDimitry Andric }
94668d75effSDimitry Andric 
94768d75effSDimitry Andric void LeakReport::PrintSummary() {
94868d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
94968d75effSDimitry Andric   uptr bytes = 0, allocations = 0;
95068d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
9510eae32dcSDimitry Andric     if (leaks_[i].is_suppressed)
9520eae32dcSDimitry Andric       continue;
95368d75effSDimitry Andric     bytes += leaks_[i].total_size;
95468d75effSDimitry Andric     allocations += leaks_[i].hit_count;
95568d75effSDimitry Andric   }
956fe6060f1SDimitry Andric   InternalScopedString summary;
9575f757f3fSDimitry Andric   summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
95868d75effSDimitry Andric                   allocations);
95968d75effSDimitry Andric   ReportErrorSummary(summary.data());
96068d75effSDimitry Andric }
96168d75effSDimitry Andric 
962e8d8bef9SDimitry Andric uptr LeakReport::ApplySuppressions() {
963e8d8bef9SDimitry Andric   LeakSuppressionContext *suppressions = GetSuppressionContext();
96406c3fb27SDimitry Andric   uptr new_suppressions = 0;
96568d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
9660eae32dcSDimitry Andric     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
9670eae32dcSDimitry Andric                                leaks_[i].total_size)) {
96868d75effSDimitry Andric       leaks_[i].is_suppressed = true;
969e8d8bef9SDimitry Andric       ++new_suppressions;
97068d75effSDimitry Andric     }
97168d75effSDimitry Andric   }
972e8d8bef9SDimitry Andric   return new_suppressions;
97368d75effSDimitry Andric }
97468d75effSDimitry Andric 
97568d75effSDimitry Andric uptr LeakReport::UnsuppressedLeakCount() {
97668d75effSDimitry Andric   uptr result = 0;
97768d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++)
9780eae32dcSDimitry Andric     if (!leaks_[i].is_suppressed)
9790eae32dcSDimitry Andric       result++;
98068d75effSDimitry Andric   return result;
98168d75effSDimitry Andric }
98268d75effSDimitry Andric 
983e8d8bef9SDimitry Andric uptr LeakReport::IndirectUnsuppressedLeakCount() {
984e8d8bef9SDimitry Andric   uptr result = 0;
985e8d8bef9SDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++)
986e8d8bef9SDimitry Andric     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
987e8d8bef9SDimitry Andric       result++;
988e8d8bef9SDimitry Andric   return result;
989e8d8bef9SDimitry Andric }
990e8d8bef9SDimitry Andric 
99168d75effSDimitry Andric }  // namespace __lsan
99268d75effSDimitry Andric #else   // CAN_SANITIZE_LEAKS
99368d75effSDimitry Andric namespace __lsan {
99468d75effSDimitry Andric void InitCommonLsan() {}
99568d75effSDimitry Andric void DoLeakCheck() {}
99668d75effSDimitry Andric void DoRecoverableLeakCheckVoid() {}
99768d75effSDimitry Andric void DisableInThisThread() {}
99868d75effSDimitry Andric void EnableInThisThread() {}
9990eae32dcSDimitry Andric }  // namespace __lsan
100068d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
100168d75effSDimitry Andric 
100268d75effSDimitry Andric using namespace __lsan;
100368d75effSDimitry Andric 
100468d75effSDimitry Andric extern "C" {
100568d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
100668d75effSDimitry Andric void __lsan_ignore_object(const void *p) {
100768d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
100868d75effSDimitry Andric   if (!common_flags()->detect_leaks)
100968d75effSDimitry Andric     return;
101068d75effSDimitry Andric   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
101168d75effSDimitry Andric   // locked.
1012349cc55cSDimitry Andric   Lock l(&global_mutex);
101306c3fb27SDimitry Andric   IgnoreObjectResult res = IgnoreObject(p);
101468d75effSDimitry Andric   if (res == kIgnoreObjectInvalid)
1015fcaf7f86SDimitry Andric     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
101668d75effSDimitry Andric   if (res == kIgnoreObjectAlreadyIgnored)
10170eae32dcSDimitry Andric     VReport(1,
10180eae32dcSDimitry Andric             "__lsan_ignore_object(): "
10190eae32dcSDimitry Andric             "heap object at %p is already being ignored\n",
10200eae32dcSDimitry Andric             p);
102168d75effSDimitry Andric   if (res == kIgnoreObjectSuccess)
102268d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
102368d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
102468d75effSDimitry Andric }
102568d75effSDimitry Andric 
102668d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
102768d75effSDimitry Andric void __lsan_register_root_region(const void *begin, uptr size) {
102868d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
1029349cc55cSDimitry Andric   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
103006c3fb27SDimitry Andric   uptr b = reinterpret_cast<uptr>(begin);
103106c3fb27SDimitry Andric   uptr e = b + size;
103206c3fb27SDimitry Andric   CHECK_LT(b, e);
103306c3fb27SDimitry Andric 
103406c3fb27SDimitry Andric   Lock l(&global_mutex);
103506c3fb27SDimitry Andric   ++GetRootRegionsLocked()[{b, e}];
103668d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
103768d75effSDimitry Andric }
103868d75effSDimitry Andric 
103968d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
104068d75effSDimitry Andric void __lsan_unregister_root_region(const void *begin, uptr size) {
104168d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
104206c3fb27SDimitry Andric   uptr b = reinterpret_cast<uptr>(begin);
104306c3fb27SDimitry Andric   uptr e = b + size;
104406c3fb27SDimitry Andric   CHECK_LT(b, e);
1045349cc55cSDimitry Andric   VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
104606c3fb27SDimitry Andric 
104706c3fb27SDimitry Andric   {
104806c3fb27SDimitry Andric     Lock l(&global_mutex);
104906c3fb27SDimitry Andric     if (auto *f = GetRootRegionsLocked().find({b, e})) {
105006c3fb27SDimitry Andric       if (--(f->second) == 0)
105106c3fb27SDimitry Andric         GetRootRegionsLocked().erase(f);
105206c3fb27SDimitry Andric       return;
105368d75effSDimitry Andric     }
105468d75effSDimitry Andric   }
105568d75effSDimitry Andric   Report(
1056349cc55cSDimitry Andric       "__lsan_unregister_root_region(): region at %p of size %zu has not "
105768d75effSDimitry Andric       "been registered.\n",
105868d75effSDimitry Andric       begin, size);
105968d75effSDimitry Andric   Die();
106068d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
106168d75effSDimitry Andric }
106268d75effSDimitry Andric 
106368d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
106468d75effSDimitry Andric void __lsan_disable() {
106568d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
106668d75effSDimitry Andric   __lsan::DisableInThisThread();
106768d75effSDimitry Andric #endif
106868d75effSDimitry Andric }
106968d75effSDimitry Andric 
107068d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
107168d75effSDimitry Andric void __lsan_enable() {
107268d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
107368d75effSDimitry Andric   __lsan::EnableInThisThread();
107468d75effSDimitry Andric #endif
107568d75effSDimitry Andric }
107668d75effSDimitry Andric 
107768d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
107868d75effSDimitry Andric void __lsan_do_leak_check() {
107968d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
108068d75effSDimitry Andric   if (common_flags()->detect_leaks)
108168d75effSDimitry Andric     __lsan::DoLeakCheck();
108268d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
108368d75effSDimitry Andric }
108468d75effSDimitry Andric 
108568d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
108668d75effSDimitry Andric int __lsan_do_recoverable_leak_check() {
108768d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
108868d75effSDimitry Andric   if (common_flags()->detect_leaks)
108968d75effSDimitry Andric     return __lsan::DoRecoverableLeakCheck();
109068d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
109168d75effSDimitry Andric   return 0;
109268d75effSDimitry Andric }
109368d75effSDimitry Andric 
1094e8d8bef9SDimitry Andric SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
109568d75effSDimitry Andric   return "";
109668d75effSDimitry Andric }
109768d75effSDimitry Andric 
1098e8d8bef9SDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS
109981ad6265SDimitry Andric SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
110068d75effSDimitry Andric   return 0;
110168d75effSDimitry Andric }
110268d75effSDimitry Andric 
110381ad6265SDimitry Andric SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
110468d75effSDimitry Andric   return "";
110568d75effSDimitry Andric }
110668d75effSDimitry Andric #endif
110768d75effSDimitry Andric }  // extern "C"
1108