xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision 0eae32dcef82f6f06de6419a0d623d7def0cc8f6)
168d75effSDimitry Andric //=-- lsan_common.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of LeakSanitizer.
1068d75effSDimitry Andric // Implementation of common leak checking functionality.
1168d75effSDimitry Andric //
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric 
1468d75effSDimitry Andric #include "lsan_common.h"
1568d75effSDimitry Andric 
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_flag_parser.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_flags.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_procmaps.h"
2168d75effSDimitry Andric #include "sanitizer_common/sanitizer_report_decorator.h"
2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2368d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h"
2468d75effSDimitry Andric #include "sanitizer_common/sanitizer_suppressions.h"
2568d75effSDimitry Andric #include "sanitizer_common/sanitizer_thread_registry.h"
2668d75effSDimitry Andric #include "sanitizer_common/sanitizer_tls_get_addr.h"
2768d75effSDimitry Andric 
2868d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
2968d75effSDimitry Andric namespace __lsan {
3068d75effSDimitry Andric 
3168d75effSDimitry Andric // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
3268d75effSDimitry Andric // also to protect the global list of root regions.
33349cc55cSDimitry Andric Mutex global_mutex;
3468d75effSDimitry Andric 
3568d75effSDimitry Andric Flags lsan_flags;
3668d75effSDimitry Andric 
3768d75effSDimitry Andric void DisableCounterUnderflow() {
3868d75effSDimitry Andric   if (common_flags()->detect_leaks) {
3968d75effSDimitry Andric     Report("Unmatched call to __lsan_enable().\n");
4068d75effSDimitry Andric     Die();
4168d75effSDimitry Andric   }
4268d75effSDimitry Andric }
4368d75effSDimitry Andric 
4468d75effSDimitry Andric void Flags::SetDefaults() {
4568d75effSDimitry Andric #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
4668d75effSDimitry Andric #  include "lsan_flags.inc"
4768d75effSDimitry Andric #  undef LSAN_FLAG
4868d75effSDimitry Andric }
4968d75effSDimitry Andric 
5068d75effSDimitry Andric void RegisterLsanFlags(FlagParser *parser, Flags *f) {
5168d75effSDimitry Andric #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
5268d75effSDimitry Andric     RegisterFlag(parser, #Name, Description, &f->Name);
5368d75effSDimitry Andric #  include "lsan_flags.inc"
5468d75effSDimitry Andric #  undef LSAN_FLAG
5568d75effSDimitry Andric }
5668d75effSDimitry Andric 
5768d75effSDimitry Andric #  define LOG_POINTERS(...)      \
5868d75effSDimitry Andric     do {                         \
59*0eae32dcSDimitry Andric       if (flags()->log_pointers) \
60*0eae32dcSDimitry Andric         Report(__VA_ARGS__);     \
6168d75effSDimitry Andric     } while (0)
6268d75effSDimitry Andric 
6368d75effSDimitry Andric #  define LOG_THREADS(...)      \
6468d75effSDimitry Andric     do {                        \
65*0eae32dcSDimitry Andric       if (flags()->log_threads) \
66*0eae32dcSDimitry Andric         Report(__VA_ARGS__);    \
6768d75effSDimitry Andric     } while (0)
6868d75effSDimitry Andric 
69e8d8bef9SDimitry Andric class LeakSuppressionContext {
70e8d8bef9SDimitry Andric   bool parsed = false;
71e8d8bef9SDimitry Andric   SuppressionContext context;
72e8d8bef9SDimitry Andric   bool suppressed_stacks_sorted = true;
73e8d8bef9SDimitry Andric   InternalMmapVector<u32> suppressed_stacks;
74*0eae32dcSDimitry Andric   const LoadedModule *suppress_module = nullptr;
75e8d8bef9SDimitry Andric 
76e8d8bef9SDimitry Andric   void LazyInit();
77*0eae32dcSDimitry Andric   Suppression *GetSuppressionForAddr(uptr addr);
78*0eae32dcSDimitry Andric   bool SuppressInvalid(const StackTrace &stack);
79*0eae32dcSDimitry Andric   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
80e8d8bef9SDimitry Andric 
81e8d8bef9SDimitry Andric  public:
82e8d8bef9SDimitry Andric   LeakSuppressionContext(const char *supprression_types[],
83e8d8bef9SDimitry Andric                          int suppression_types_num)
84e8d8bef9SDimitry Andric       : context(supprression_types, suppression_types_num) {}
85e8d8bef9SDimitry Andric 
86*0eae32dcSDimitry Andric   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
87e8d8bef9SDimitry Andric 
88e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
89e8d8bef9SDimitry Andric     if (!suppressed_stacks_sorted) {
90e8d8bef9SDimitry Andric       suppressed_stacks_sorted = true;
91e8d8bef9SDimitry Andric       SortAndDedup(suppressed_stacks);
92e8d8bef9SDimitry Andric     }
93e8d8bef9SDimitry Andric     return suppressed_stacks;
94e8d8bef9SDimitry Andric   }
95e8d8bef9SDimitry Andric   void PrintMatchedSuppressions();
96e8d8bef9SDimitry Andric };
97e8d8bef9SDimitry Andric 
98e8d8bef9SDimitry Andric ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
99e8d8bef9SDimitry Andric static LeakSuppressionContext *suppression_ctx = nullptr;
10068d75effSDimitry Andric static const char kSuppressionLeak[] = "leak";
10168d75effSDimitry Andric static const char *kSuppressionTypes[] = {kSuppressionLeak};
10268d75effSDimitry Andric static const char kStdSuppressions[] =
10368d75effSDimitry Andric #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
10468d75effSDimitry Andric     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
10568d75effSDimitry Andric     // definition.
10668d75effSDimitry Andric     "leak:*pthread_exit*\n"
10768d75effSDimitry Andric #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
10868d75effSDimitry Andric #  if SANITIZER_MAC
10968d75effSDimitry Andric     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
11068d75effSDimitry Andric     "leak:*_os_trace*\n"
11168d75effSDimitry Andric #  endif
11268d75effSDimitry Andric     // TLS leak in some glibc versions, described in
11368d75effSDimitry Andric     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
11468d75effSDimitry Andric     "leak:*tls_get_addr*\n";
11568d75effSDimitry Andric 
11668d75effSDimitry Andric void InitializeSuppressions() {
11768d75effSDimitry Andric   CHECK_EQ(nullptr, suppression_ctx);
11868d75effSDimitry Andric   suppression_ctx = new (suppression_placeholder)
119e8d8bef9SDimitry Andric       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
12068d75effSDimitry Andric }
12168d75effSDimitry Andric 
122e8d8bef9SDimitry Andric void LeakSuppressionContext::LazyInit() {
123e8d8bef9SDimitry Andric   if (!parsed) {
124e8d8bef9SDimitry Andric     parsed = true;
125e8d8bef9SDimitry Andric     context.ParseFromFile(flags()->suppressions);
126e8d8bef9SDimitry Andric     if (&__lsan_default_suppressions)
127e8d8bef9SDimitry Andric       context.Parse(__lsan_default_suppressions());
128e8d8bef9SDimitry Andric     context.Parse(kStdSuppressions);
129*0eae32dcSDimitry Andric     if (flags()->use_tls && flags()->use_ld_allocations)
130*0eae32dcSDimitry Andric       suppress_module = GetLinker();
131e8d8bef9SDimitry Andric   }
132e8d8bef9SDimitry Andric }
133e8d8bef9SDimitry Andric 
134*0eae32dcSDimitry Andric Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
135*0eae32dcSDimitry Andric   Suppression *s = nullptr;
136*0eae32dcSDimitry Andric 
137*0eae32dcSDimitry Andric   // Suppress by module name.
138*0eae32dcSDimitry Andric   if (const char *module_name =
139*0eae32dcSDimitry Andric           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
140*0eae32dcSDimitry Andric     if (context.Match(module_name, kSuppressionLeak, &s))
141*0eae32dcSDimitry Andric       return s;
142*0eae32dcSDimitry Andric 
143*0eae32dcSDimitry Andric   // Suppress by file or function name.
144*0eae32dcSDimitry Andric   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
145*0eae32dcSDimitry Andric   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
146*0eae32dcSDimitry Andric     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
147*0eae32dcSDimitry Andric         context.Match(cur->info.file, kSuppressionLeak, &s)) {
148*0eae32dcSDimitry Andric       break;
149*0eae32dcSDimitry Andric     }
150*0eae32dcSDimitry Andric   }
151*0eae32dcSDimitry Andric   frames->ClearAll();
152*0eae32dcSDimitry Andric   return s;
153*0eae32dcSDimitry Andric }
154*0eae32dcSDimitry Andric 
155*0eae32dcSDimitry Andric static uptr GetCallerPC(const StackTrace &stack) {
156*0eae32dcSDimitry Andric   // The top frame is our malloc/calloc/etc. The next frame is the caller.
157*0eae32dcSDimitry Andric   if (stack.size >= 2)
158*0eae32dcSDimitry Andric     return stack.trace[1];
159*0eae32dcSDimitry Andric   return 0;
160*0eae32dcSDimitry Andric }
161*0eae32dcSDimitry Andric 
162*0eae32dcSDimitry Andric // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
163*0eae32dcSDimitry Andric // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
164*0eae32dcSDimitry Andric // modules accounting etc.
165*0eae32dcSDimitry Andric // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
166*0eae32dcSDimitry Andric // They are allocated with a __libc_memalign() call in allocate_and_init()
167*0eae32dcSDimitry Andric // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
168*0eae32dcSDimitry Andric // blocks, but we can make sure they come from our own allocator by intercepting
169*0eae32dcSDimitry Andric // __libc_memalign(). On top of that, there is no easy way to reach them. Their
170*0eae32dcSDimitry Andric // addresses are stored in a dynamically allocated array (the DTV) which is
171*0eae32dcSDimitry Andric // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
172*0eae32dcSDimitry Andric // being reachable from the static TLS, and the dynamic TLS being reachable from
173*0eae32dcSDimitry Andric // the DTV. This is because the initial DTV is allocated before our interception
174*0eae32dcSDimitry Andric // mechanism kicks in, and thus we don't recognize it as allocated memory. We
175*0eae32dcSDimitry Andric // can't special-case it either, since we don't know its size.
176*0eae32dcSDimitry Andric // Our solution is to include in the root set all allocations made from
177*0eae32dcSDimitry Andric // ld-linux.so (which is where allocate_and_init() is implemented). This is
178*0eae32dcSDimitry Andric // guaranteed to include all dynamic TLS blocks (and possibly other allocations
179*0eae32dcSDimitry Andric // which we don't care about).
180*0eae32dcSDimitry Andric // On all other platforms, this simply checks to ensure that the caller pc is
181*0eae32dcSDimitry Andric // valid before reporting chunks as leaked.
182*0eae32dcSDimitry Andric bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
183*0eae32dcSDimitry Andric   uptr caller_pc = GetCallerPC(stack);
184*0eae32dcSDimitry Andric   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
185*0eae32dcSDimitry Andric   // it as reachable, as we can't properly report its allocation stack anyway.
186*0eae32dcSDimitry Andric   return !caller_pc ||
187*0eae32dcSDimitry Andric          (suppress_module && suppress_module->containsAddress(caller_pc));
188*0eae32dcSDimitry Andric }
189*0eae32dcSDimitry Andric 
190*0eae32dcSDimitry Andric bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
191*0eae32dcSDimitry Andric                                             uptr hit_count, uptr total_size) {
192*0eae32dcSDimitry Andric   for (uptr i = 0; i < stack.size; i++) {
193*0eae32dcSDimitry Andric     Suppression *s = GetSuppressionForAddr(
194*0eae32dcSDimitry Andric         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
195*0eae32dcSDimitry Andric     if (s) {
196*0eae32dcSDimitry Andric       s->weight += total_size;
197*0eae32dcSDimitry Andric       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
198*0eae32dcSDimitry Andric       return true;
199*0eae32dcSDimitry Andric     }
200*0eae32dcSDimitry Andric   }
201*0eae32dcSDimitry Andric   return false;
202*0eae32dcSDimitry Andric }
203*0eae32dcSDimitry Andric 
204*0eae32dcSDimitry Andric bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
205*0eae32dcSDimitry Andric                                       uptr total_size) {
206*0eae32dcSDimitry Andric   LazyInit();
207*0eae32dcSDimitry Andric   StackTrace stack = StackDepotGet(stack_trace_id);
208*0eae32dcSDimitry Andric   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
209*0eae32dcSDimitry Andric     return false;
210*0eae32dcSDimitry Andric   suppressed_stacks_sorted = false;
211*0eae32dcSDimitry Andric   suppressed_stacks.push_back(stack_trace_id);
212*0eae32dcSDimitry Andric   return true;
213*0eae32dcSDimitry Andric }
214*0eae32dcSDimitry Andric 
215e8d8bef9SDimitry Andric static LeakSuppressionContext *GetSuppressionContext() {
21668d75effSDimitry Andric   CHECK(suppression_ctx);
21768d75effSDimitry Andric   return suppression_ctx;
21868d75effSDimitry Andric }
21968d75effSDimitry Andric 
220349cc55cSDimitry Andric static InternalMmapVectorNoCtor<RootRegion> root_regions;
22168d75effSDimitry Andric 
222349cc55cSDimitry Andric InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
223349cc55cSDimitry Andric   return &root_regions;
22468d75effSDimitry Andric }
22568d75effSDimitry Andric 
22668d75effSDimitry Andric void InitCommonLsan() {
22768d75effSDimitry Andric   if (common_flags()->detect_leaks) {
22868d75effSDimitry Andric     // Initialization which can fail or print warnings should only be done if
22968d75effSDimitry Andric     // LSan is actually enabled.
23068d75effSDimitry Andric     InitializeSuppressions();
23168d75effSDimitry Andric     InitializePlatformSpecificModules();
23268d75effSDimitry Andric   }
23368d75effSDimitry Andric }
23468d75effSDimitry Andric 
23568d75effSDimitry Andric class Decorator : public __sanitizer::SanitizerCommonDecorator {
23668d75effSDimitry Andric  public:
23768d75effSDimitry Andric   Decorator() : SanitizerCommonDecorator() {}
23868d75effSDimitry Andric   const char *Error() { return Red(); }
23968d75effSDimitry Andric   const char *Leak() { return Blue(); }
24068d75effSDimitry Andric };
24168d75effSDimitry Andric 
24268d75effSDimitry Andric static inline bool CanBeAHeapPointer(uptr p) {
24368d75effSDimitry Andric   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
24468d75effSDimitry Andric   // bound on heap addresses.
24568d75effSDimitry Andric   const uptr kMinAddress = 4 * 4096;
246*0eae32dcSDimitry Andric   if (p < kMinAddress)
247*0eae32dcSDimitry Andric     return false;
24868d75effSDimitry Andric #  if defined(__x86_64__)
24968d75effSDimitry Andric   // Accept only canonical form user-space addresses.
25068d75effSDimitry Andric   return ((p >> 47) == 0);
25168d75effSDimitry Andric #  elif defined(__mips64)
25268d75effSDimitry Andric   return ((p >> 40) == 0);
25368d75effSDimitry Andric #  elif defined(__aarch64__)
254*0eae32dcSDimitry Andric   unsigned runtimeVMA = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
25568d75effSDimitry Andric   return ((p >> runtimeVMA) == 0);
25668d75effSDimitry Andric #  else
25768d75effSDimitry Andric   return true;
25868d75effSDimitry Andric #  endif
25968d75effSDimitry Andric }
26068d75effSDimitry Andric 
26168d75effSDimitry Andric // Scans the memory range, looking for byte patterns that point into allocator
26268d75effSDimitry Andric // chunks. Marks those chunks with |tag| and adds them to |frontier|.
26368d75effSDimitry Andric // There are two usage modes for this function: finding reachable chunks
26468d75effSDimitry Andric // (|tag| = kReachable) and finding indirectly leaked chunks
26568d75effSDimitry Andric // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
26668d75effSDimitry Andric // so |frontier| = 0.
267*0eae32dcSDimitry Andric void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
26868d75effSDimitry Andric                           const char *region_type, ChunkTag tag) {
26968d75effSDimitry Andric   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
27068d75effSDimitry Andric   const uptr alignment = flags()->pointer_alignment();
271349cc55cSDimitry Andric   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
272349cc55cSDimitry Andric                (void *)end);
27368d75effSDimitry Andric   uptr pp = begin;
27468d75effSDimitry Andric   if (pp % alignment)
27568d75effSDimitry Andric     pp = pp + alignment - pp % alignment;
27668d75effSDimitry Andric   for (; pp + sizeof(void *) <= end; pp += alignment) {
27768d75effSDimitry Andric     void *p = *reinterpret_cast<void **>(pp);
278*0eae32dcSDimitry Andric     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p)))
279*0eae32dcSDimitry Andric       continue;
28068d75effSDimitry Andric     uptr chunk = PointsIntoChunk(p);
281*0eae32dcSDimitry Andric     if (!chunk)
282*0eae32dcSDimitry Andric       continue;
28368d75effSDimitry Andric     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
284*0eae32dcSDimitry Andric     if (chunk == begin)
285*0eae32dcSDimitry Andric       continue;
28668d75effSDimitry Andric     LsanMetadata m(chunk);
287*0eae32dcSDimitry Andric     if (m.tag() == kReachable || m.tag() == kIgnored)
288*0eae32dcSDimitry Andric       continue;
28968d75effSDimitry Andric 
29068d75effSDimitry Andric     // Do this check relatively late so we can log only the interesting cases.
29168d75effSDimitry Andric     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
29268d75effSDimitry Andric       LOG_POINTERS(
29368d75effSDimitry Andric           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
29468d75effSDimitry Andric           "%zu.\n",
295349cc55cSDimitry Andric           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
296349cc55cSDimitry Andric           m.requested_size());
29768d75effSDimitry Andric       continue;
29868d75effSDimitry Andric     }
29968d75effSDimitry Andric 
30068d75effSDimitry Andric     m.set_tag(tag);
301349cc55cSDimitry Andric     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
302349cc55cSDimitry Andric                  (void *)pp, p, (void *)chunk,
303349cc55cSDimitry Andric                  (void *)(chunk + m.requested_size()), m.requested_size());
30468d75effSDimitry Andric     if (frontier)
30568d75effSDimitry Andric       frontier->push_back(chunk);
30668d75effSDimitry Andric   }
30768d75effSDimitry Andric }
30868d75effSDimitry Andric 
30968d75effSDimitry Andric // Scans a global range for pointers
31068d75effSDimitry Andric void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
31168d75effSDimitry Andric   uptr allocator_begin = 0, allocator_end = 0;
31268d75effSDimitry Andric   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
31368d75effSDimitry Andric   if (begin <= allocator_begin && allocator_begin < end) {
31468d75effSDimitry Andric     CHECK_LE(allocator_begin, allocator_end);
31568d75effSDimitry Andric     CHECK_LE(allocator_end, end);
31668d75effSDimitry Andric     if (begin < allocator_begin)
31768d75effSDimitry Andric       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
31868d75effSDimitry Andric                            kReachable);
31968d75effSDimitry Andric     if (allocator_end < end)
32068d75effSDimitry Andric       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
32168d75effSDimitry Andric   } else {
32268d75effSDimitry Andric     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
32368d75effSDimitry Andric   }
32468d75effSDimitry Andric }
32568d75effSDimitry Andric 
32668d75effSDimitry Andric void ForEachExtraStackRangeCb(uptr begin, uptr end, void *arg) {
32768d75effSDimitry Andric   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
32868d75effSDimitry Andric   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
32968d75effSDimitry Andric }
33068d75effSDimitry Andric 
3315ffd83dbSDimitry Andric #  if SANITIZER_FUCHSIA
3325ffd83dbSDimitry Andric 
3335ffd83dbSDimitry Andric // Fuchsia handles all threads together with its own callback.
3345ffd83dbSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
3355ffd83dbSDimitry Andric 
3365ffd83dbSDimitry Andric #  else
3375ffd83dbSDimitry Andric 
338e8d8bef9SDimitry Andric #    if SANITIZER_ANDROID
339e8d8bef9SDimitry Andric // FIXME: Move this out into *libcdep.cpp
340e8d8bef9SDimitry Andric extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
341e8d8bef9SDimitry Andric     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
342e8d8bef9SDimitry Andric #    endif
343e8d8bef9SDimitry Andric 
344e8d8bef9SDimitry Andric static void ProcessThreadRegistry(Frontier *frontier) {
345e8d8bef9SDimitry Andric   InternalMmapVector<uptr> ptrs;
346e8d8bef9SDimitry Andric   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
347e8d8bef9SDimitry Andric       GetAdditionalThreadContextPtrs, &ptrs);
348e8d8bef9SDimitry Andric 
349e8d8bef9SDimitry Andric   for (uptr i = 0; i < ptrs.size(); ++i) {
350e8d8bef9SDimitry Andric     void *ptr = reinterpret_cast<void *>(ptrs[i]);
351e8d8bef9SDimitry Andric     uptr chunk = PointsIntoChunk(ptr);
352e8d8bef9SDimitry Andric     if (!chunk)
353e8d8bef9SDimitry Andric       continue;
354e8d8bef9SDimitry Andric     LsanMetadata m(chunk);
355e8d8bef9SDimitry Andric     if (!m.allocated())
356e8d8bef9SDimitry Andric       continue;
357e8d8bef9SDimitry Andric 
358e8d8bef9SDimitry Andric     // Mark as reachable and add to frontier.
359e8d8bef9SDimitry Andric     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
360e8d8bef9SDimitry Andric     m.set_tag(kReachable);
361e8d8bef9SDimitry Andric     frontier->push_back(chunk);
362e8d8bef9SDimitry Andric   }
363e8d8bef9SDimitry Andric }
364e8d8bef9SDimitry Andric 
36568d75effSDimitry Andric // Scans thread data (stacks and TLS) for heap pointers.
36668d75effSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
36768d75effSDimitry Andric                            Frontier *frontier) {
368e8d8bef9SDimitry Andric   InternalMmapVector<uptr> registers;
36968d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
37068d75effSDimitry Andric     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
371349cc55cSDimitry Andric     LOG_THREADS("Processing thread %llu.\n", os_id);
37268d75effSDimitry Andric     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
37368d75effSDimitry Andric     DTLS *dtls;
374*0eae32dcSDimitry Andric     bool thread_found =
375*0eae32dcSDimitry Andric         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
376*0eae32dcSDimitry Andric                               &tls_end, &cache_begin, &cache_end, &dtls);
37768d75effSDimitry Andric     if (!thread_found) {
37868d75effSDimitry Andric       // If a thread can't be found in the thread registry, it's probably in the
37968d75effSDimitry Andric       // process of destruction. Log this event and move on.
380349cc55cSDimitry Andric       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
38168d75effSDimitry Andric       continue;
38268d75effSDimitry Andric     }
38368d75effSDimitry Andric     uptr sp;
38468d75effSDimitry Andric     PtraceRegistersStatus have_registers =
385e8d8bef9SDimitry Andric         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
38668d75effSDimitry Andric     if (have_registers != REGISTERS_AVAILABLE) {
387349cc55cSDimitry Andric       Report("Unable to get registers from thread %llu.\n", os_id);
38868d75effSDimitry Andric       // If unable to get SP, consider the entire stack to be reachable unless
38968d75effSDimitry Andric       // GetRegistersAndSP failed with ESRCH.
390*0eae32dcSDimitry Andric       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
391*0eae32dcSDimitry Andric         continue;
39268d75effSDimitry Andric       sp = stack_begin;
39368d75effSDimitry Andric     }
39468d75effSDimitry Andric 
395e8d8bef9SDimitry Andric     if (flags()->use_registers && have_registers) {
396e8d8bef9SDimitry Andric       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
397e8d8bef9SDimitry Andric       uptr registers_end =
398e8d8bef9SDimitry Andric           reinterpret_cast<uptr>(registers.data() + registers.size());
39968d75effSDimitry Andric       ScanRangeForPointers(registers_begin, registers_end, frontier,
40068d75effSDimitry Andric                            "REGISTERS", kReachable);
401e8d8bef9SDimitry Andric     }
40268d75effSDimitry Andric 
40368d75effSDimitry Andric     if (flags()->use_stacks) {
404349cc55cSDimitry Andric       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
405349cc55cSDimitry Andric                   (void *)stack_end, (void *)sp);
40668d75effSDimitry Andric       if (sp < stack_begin || sp >= stack_end) {
40768d75effSDimitry Andric         // SP is outside the recorded stack range (e.g. the thread is running a
40868d75effSDimitry Andric         // signal handler on alternate stack, or swapcontext was used).
40968d75effSDimitry Andric         // Again, consider the entire stack range to be reachable.
41068d75effSDimitry Andric         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
41168d75effSDimitry Andric         uptr page_size = GetPageSizeCached();
41268d75effSDimitry Andric         int skipped = 0;
41368d75effSDimitry Andric         while (stack_begin < stack_end &&
41468d75effSDimitry Andric                !IsAccessibleMemoryRange(stack_begin, 1)) {
41568d75effSDimitry Andric           skipped++;
41668d75effSDimitry Andric           stack_begin += page_size;
41768d75effSDimitry Andric         }
41868d75effSDimitry Andric         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
419349cc55cSDimitry Andric                     skipped, (void *)stack_begin, (void *)stack_end);
42068d75effSDimitry Andric       } else {
42168d75effSDimitry Andric         // Shrink the stack range to ignore out-of-scope values.
42268d75effSDimitry Andric         stack_begin = sp;
42368d75effSDimitry Andric       }
42468d75effSDimitry Andric       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
42568d75effSDimitry Andric                            kReachable);
42668d75effSDimitry Andric       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
42768d75effSDimitry Andric     }
42868d75effSDimitry Andric 
42968d75effSDimitry Andric     if (flags()->use_tls) {
43068d75effSDimitry Andric       if (tls_begin) {
431349cc55cSDimitry Andric         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
43268d75effSDimitry Andric         // If the tls and cache ranges don't overlap, scan full tls range,
43368d75effSDimitry Andric         // otherwise, only scan the non-overlapping portions
43468d75effSDimitry Andric         if (cache_begin == cache_end || tls_end < cache_begin ||
43568d75effSDimitry Andric             tls_begin > cache_end) {
43668d75effSDimitry Andric           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
43768d75effSDimitry Andric         } else {
43868d75effSDimitry Andric           if (tls_begin < cache_begin)
43968d75effSDimitry Andric             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
44068d75effSDimitry Andric                                  kReachable);
44168d75effSDimitry Andric           if (tls_end > cache_end)
44268d75effSDimitry Andric             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
44368d75effSDimitry Andric                                  kReachable);
44468d75effSDimitry Andric         }
44568d75effSDimitry Andric       }
446e8d8bef9SDimitry Andric #    if SANITIZER_ANDROID
447e8d8bef9SDimitry Andric       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
448e8d8bef9SDimitry Andric                      void *arg) -> void {
449e8d8bef9SDimitry Andric         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
450e8d8bef9SDimitry Andric                              reinterpret_cast<uptr>(dtls_end),
451e8d8bef9SDimitry Andric                              reinterpret_cast<Frontier *>(arg), "DTLS",
452e8d8bef9SDimitry Andric                              kReachable);
453e8d8bef9SDimitry Andric       };
454e8d8bef9SDimitry Andric 
455e8d8bef9SDimitry Andric       // FIXME: There might be a race-condition here (and in Bionic) if the
456e8d8bef9SDimitry Andric       // thread is suspended in the middle of updating its DTLS. IOWs, we
457e8d8bef9SDimitry Andric       // could scan already freed memory. (probably fine for now)
458e8d8bef9SDimitry Andric       __libc_iterate_dynamic_tls(os_id, cb, frontier);
459e8d8bef9SDimitry Andric #    else
46068d75effSDimitry Andric       if (dtls && !DTLSInDestruction(dtls)) {
461e8d8bef9SDimitry Andric         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
462e8d8bef9SDimitry Andric           uptr dtls_beg = dtv.beg;
463e8d8bef9SDimitry Andric           uptr dtls_end = dtls_beg + dtv.size;
46468d75effSDimitry Andric           if (dtls_beg < dtls_end) {
465349cc55cSDimitry Andric             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
466349cc55cSDimitry Andric                         (void *)dtls_end);
46768d75effSDimitry Andric             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
46868d75effSDimitry Andric                                  kReachable);
46968d75effSDimitry Andric           }
470e8d8bef9SDimitry Andric         });
47168d75effSDimitry Andric       } else {
47268d75effSDimitry Andric         // We are handling a thread with DTLS under destruction. Log about
47368d75effSDimitry Andric         // this and continue.
474349cc55cSDimitry Andric         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
47568d75effSDimitry Andric       }
476e8d8bef9SDimitry Andric #    endif
47768d75effSDimitry Andric     }
47868d75effSDimitry Andric   }
479e8d8bef9SDimitry Andric 
480e8d8bef9SDimitry Andric   // Add pointers reachable from ThreadContexts
481e8d8bef9SDimitry Andric   ProcessThreadRegistry(frontier);
48268d75effSDimitry Andric }
48368d75effSDimitry Andric 
4845ffd83dbSDimitry Andric #  endif  // SANITIZER_FUCHSIA
4855ffd83dbSDimitry Andric 
48668d75effSDimitry Andric void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
48768d75effSDimitry Andric                     uptr region_begin, uptr region_end, bool is_readable) {
48868d75effSDimitry Andric   uptr intersection_begin = Max(root_region.begin, region_begin);
48968d75effSDimitry Andric   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
490*0eae32dcSDimitry Andric   if (intersection_begin >= intersection_end)
491*0eae32dcSDimitry Andric     return;
49268d75effSDimitry Andric   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
493349cc55cSDimitry Andric                (void *)root_region.begin,
494349cc55cSDimitry Andric                (void *)(root_region.begin + root_region.size),
495349cc55cSDimitry Andric                (void *)region_begin, (void *)region_end,
49668d75effSDimitry Andric                is_readable ? "readable" : "unreadable");
49768d75effSDimitry Andric   if (is_readable)
49868d75effSDimitry Andric     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
49968d75effSDimitry Andric                          kReachable);
50068d75effSDimitry Andric }
50168d75effSDimitry Andric 
50268d75effSDimitry Andric static void ProcessRootRegion(Frontier *frontier,
50368d75effSDimitry Andric                               const RootRegion &root_region) {
50468d75effSDimitry Andric   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
50568d75effSDimitry Andric   MemoryMappedSegment segment;
50668d75effSDimitry Andric   while (proc_maps.Next(&segment)) {
50768d75effSDimitry Andric     ScanRootRegion(frontier, root_region, segment.start, segment.end,
50868d75effSDimitry Andric                    segment.IsReadable());
50968d75effSDimitry Andric   }
51068d75effSDimitry Andric }
51168d75effSDimitry Andric 
51268d75effSDimitry Andric // Scans root regions for heap pointers.
51368d75effSDimitry Andric static void ProcessRootRegions(Frontier *frontier) {
514*0eae32dcSDimitry Andric   if (!flags()->use_root_regions)
515*0eae32dcSDimitry Andric     return;
516349cc55cSDimitry Andric   for (uptr i = 0; i < root_regions.size(); i++)
517349cc55cSDimitry Andric     ProcessRootRegion(frontier, root_regions[i]);
51868d75effSDimitry Andric }
51968d75effSDimitry Andric 
52068d75effSDimitry Andric static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
52168d75effSDimitry Andric   while (frontier->size()) {
52268d75effSDimitry Andric     uptr next_chunk = frontier->back();
52368d75effSDimitry Andric     frontier->pop_back();
52468d75effSDimitry Andric     LsanMetadata m(next_chunk);
52568d75effSDimitry Andric     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
52668d75effSDimitry Andric                          "HEAP", tag);
52768d75effSDimitry Andric   }
52868d75effSDimitry Andric }
52968d75effSDimitry Andric 
53068d75effSDimitry Andric // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
53168d75effSDimitry Andric // which are reachable from it as indirectly leaked.
53268d75effSDimitry Andric static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
53368d75effSDimitry Andric   chunk = GetUserBegin(chunk);
53468d75effSDimitry Andric   LsanMetadata m(chunk);
53568d75effSDimitry Andric   if (m.allocated() && m.tag() != kReachable) {
53668d75effSDimitry Andric     ScanRangeForPointers(chunk, chunk + m.requested_size(),
53768d75effSDimitry Andric                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
53868d75effSDimitry Andric   }
53968d75effSDimitry Andric }
54068d75effSDimitry Andric 
541e8d8bef9SDimitry Andric static void IgnoredSuppressedCb(uptr chunk, void *arg) {
542e8d8bef9SDimitry Andric   CHECK(arg);
543e8d8bef9SDimitry Andric   chunk = GetUserBegin(chunk);
544e8d8bef9SDimitry Andric   LsanMetadata m(chunk);
545e8d8bef9SDimitry Andric   if (!m.allocated() || m.tag() == kIgnored)
546e8d8bef9SDimitry Andric     return;
547e8d8bef9SDimitry Andric 
548e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &suppressed =
549e8d8bef9SDimitry Andric       *static_cast<const InternalMmapVector<u32> *>(arg);
550e8d8bef9SDimitry Andric   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
551e8d8bef9SDimitry Andric   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
552e8d8bef9SDimitry Andric     return;
553e8d8bef9SDimitry Andric 
554349cc55cSDimitry Andric   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
555349cc55cSDimitry Andric                (void *)(chunk + m.requested_size()), m.requested_size());
556e8d8bef9SDimitry Andric   m.set_tag(kIgnored);
557e8d8bef9SDimitry Andric }
558e8d8bef9SDimitry Andric 
55968d75effSDimitry Andric // ForEachChunk callback. If chunk is marked as ignored, adds its address to
56068d75effSDimitry Andric // frontier.
56168d75effSDimitry Andric static void CollectIgnoredCb(uptr chunk, void *arg) {
56268d75effSDimitry Andric   CHECK(arg);
56368d75effSDimitry Andric   chunk = GetUserBegin(chunk);
56468d75effSDimitry Andric   LsanMetadata m(chunk);
56568d75effSDimitry Andric   if (m.allocated() && m.tag() == kIgnored) {
566349cc55cSDimitry Andric     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
567349cc55cSDimitry Andric                  (void *)(chunk + m.requested_size()), m.requested_size());
56868d75effSDimitry Andric     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
56968d75effSDimitry Andric   }
57068d75effSDimitry Andric }
57168d75effSDimitry Andric 
57268d75effSDimitry Andric // Sets the appropriate tag on each chunk.
5735ffd83dbSDimitry Andric static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
5745ffd83dbSDimitry Andric                               Frontier *frontier) {
575e8d8bef9SDimitry Andric   const InternalMmapVector<u32> &suppressed_stacks =
576e8d8bef9SDimitry Andric       GetSuppressionContext()->GetSortedSuppressedStacks();
577e8d8bef9SDimitry Andric   if (!suppressed_stacks.empty()) {
578e8d8bef9SDimitry Andric     ForEachChunk(IgnoredSuppressedCb,
579e8d8bef9SDimitry Andric                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
580e8d8bef9SDimitry Andric   }
5815ffd83dbSDimitry Andric   ForEachChunk(CollectIgnoredCb, frontier);
5825ffd83dbSDimitry Andric   ProcessGlobalRegions(frontier);
5835ffd83dbSDimitry Andric   ProcessThreads(suspended_threads, frontier);
5845ffd83dbSDimitry Andric   ProcessRootRegions(frontier);
5855ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
58668d75effSDimitry Andric 
58768d75effSDimitry Andric   // The check here is relatively expensive, so we do this in a separate flood
58868d75effSDimitry Andric   // fill. That way we can skip the check for chunks that are reachable
58968d75effSDimitry Andric   // otherwise.
59068d75effSDimitry Andric   LOG_POINTERS("Processing platform-specific allocations.\n");
5915ffd83dbSDimitry Andric   ProcessPlatformSpecificAllocations(frontier);
5925ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
59368d75effSDimitry Andric 
59468d75effSDimitry Andric   // Iterate over leaked chunks and mark those that are reachable from other
59568d75effSDimitry Andric   // leaked chunks.
59668d75effSDimitry Andric   LOG_POINTERS("Scanning leaked chunks.\n");
59768d75effSDimitry Andric   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
59868d75effSDimitry Andric }
59968d75effSDimitry Andric 
60068d75effSDimitry Andric // ForEachChunk callback. Resets the tags to pre-leak-check state.
60168d75effSDimitry Andric static void ResetTagsCb(uptr chunk, void *arg) {
60268d75effSDimitry Andric   (void)arg;
60368d75effSDimitry Andric   chunk = GetUserBegin(chunk);
60468d75effSDimitry Andric   LsanMetadata m(chunk);
60568d75effSDimitry Andric   if (m.allocated() && m.tag() != kIgnored)
60668d75effSDimitry Andric     m.set_tag(kDirectlyLeaked);
60768d75effSDimitry Andric }
60868d75effSDimitry Andric 
60968d75effSDimitry Andric // ForEachChunk callback. Aggregates information about unreachable chunks into
61068d75effSDimitry Andric // a LeakReport.
61168d75effSDimitry Andric static void CollectLeaksCb(uptr chunk, void *arg) {
61268d75effSDimitry Andric   CHECK(arg);
613*0eae32dcSDimitry Andric   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
61468d75effSDimitry Andric   chunk = GetUserBegin(chunk);
61568d75effSDimitry Andric   LsanMetadata m(chunk);
616*0eae32dcSDimitry Andric   if (!m.allocated())
617*0eae32dcSDimitry Andric     return;
618*0eae32dcSDimitry Andric   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
619*0eae32dcSDimitry Andric     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
62068d75effSDimitry Andric }
62168d75effSDimitry Andric 
622e8d8bef9SDimitry Andric void LeakSuppressionContext::PrintMatchedSuppressions() {
62368d75effSDimitry Andric   InternalMmapVector<Suppression *> matched;
624e8d8bef9SDimitry Andric   context.GetMatched(&matched);
62568d75effSDimitry Andric   if (!matched.size())
62668d75effSDimitry Andric     return;
62768d75effSDimitry Andric   const char *line = "-----------------------------------------------------";
62868d75effSDimitry Andric   Printf("%s\n", line);
62968d75effSDimitry Andric   Printf("Suppressions used:\n");
63068d75effSDimitry Andric   Printf("  count      bytes template\n");
631e8d8bef9SDimitry Andric   for (uptr i = 0; i < matched.size(); i++) {
632e8d8bef9SDimitry Andric     Printf("%7zu %10zu %s\n",
633e8d8bef9SDimitry Andric            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
634e8d8bef9SDimitry Andric            matched[i]->weight, matched[i]->templ);
635e8d8bef9SDimitry Andric   }
63668d75effSDimitry Andric   Printf("%s\n\n", line);
63768d75effSDimitry Andric }
63868d75effSDimitry Andric 
63968d75effSDimitry Andric static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
64068d75effSDimitry Andric   const InternalMmapVector<tid_t> &suspended_threads =
64168d75effSDimitry Andric       *(const InternalMmapVector<tid_t> *)arg;
64268d75effSDimitry Andric   if (tctx->status == ThreadStatusRunning) {
643e8d8bef9SDimitry Andric     uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
64468d75effSDimitry Andric     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
645349cc55cSDimitry Andric       Report(
646349cc55cSDimitry Andric           "Running thread %llu was not suspended. False leaks are possible.\n",
64768d75effSDimitry Andric           tctx->os_id);
64868d75effSDimitry Andric   }
64968d75effSDimitry Andric }
65068d75effSDimitry Andric 
6515ffd83dbSDimitry Andric #  if SANITIZER_FUCHSIA
6525ffd83dbSDimitry Andric 
6535ffd83dbSDimitry Andric // Fuchsia provides a libc interface that guarantees all threads are
6545ffd83dbSDimitry Andric // covered, and SuspendedThreadList is never really used.
6555ffd83dbSDimitry Andric static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
6565ffd83dbSDimitry Andric 
6575ffd83dbSDimitry Andric #  else  // !SANITIZER_FUCHSIA
6585ffd83dbSDimitry Andric 
65968d75effSDimitry Andric static void ReportUnsuspendedThreads(
66068d75effSDimitry Andric     const SuspendedThreadsList &suspended_threads) {
66168d75effSDimitry Andric   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
66268d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
66368d75effSDimitry Andric     threads[i] = suspended_threads.GetThreadID(i);
66468d75effSDimitry Andric 
66568d75effSDimitry Andric   Sort(threads.data(), threads.size());
66668d75effSDimitry Andric 
66768d75effSDimitry Andric   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
66868d75effSDimitry Andric       &ReportIfNotSuspended, &threads);
66968d75effSDimitry Andric }
67068d75effSDimitry Andric 
6715ffd83dbSDimitry Andric #  endif  // !SANITIZER_FUCHSIA
6725ffd83dbSDimitry Andric 
67368d75effSDimitry Andric static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
67468d75effSDimitry Andric                                   void *arg) {
67568d75effSDimitry Andric   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
67668d75effSDimitry Andric   CHECK(param);
67768d75effSDimitry Andric   CHECK(!param->success);
67868d75effSDimitry Andric   ReportUnsuspendedThreads(suspended_threads);
6795ffd83dbSDimitry Andric   ClassifyAllChunks(suspended_threads, &param->frontier);
680*0eae32dcSDimitry Andric   ForEachChunk(CollectLeaksCb, &param->leaks);
68168d75effSDimitry Andric   // Clean up for subsequent leak checks. This assumes we did not overwrite any
68268d75effSDimitry Andric   // kIgnored tags.
68368d75effSDimitry Andric   ForEachChunk(ResetTagsCb, nullptr);
68468d75effSDimitry Andric   param->success = true;
68568d75effSDimitry Andric }
68668d75effSDimitry Andric 
687e8d8bef9SDimitry Andric static bool PrintResults(LeakReport &report) {
688e8d8bef9SDimitry Andric   uptr unsuppressed_count = report.UnsuppressedLeakCount();
689e8d8bef9SDimitry Andric   if (unsuppressed_count) {
690e8d8bef9SDimitry Andric     Decorator d;
691e8d8bef9SDimitry Andric     Printf(
692e8d8bef9SDimitry Andric         "\n"
693e8d8bef9SDimitry Andric         "================================================================="
694e8d8bef9SDimitry Andric         "\n");
695e8d8bef9SDimitry Andric     Printf("%s", d.Error());
696e8d8bef9SDimitry Andric     Report("ERROR: LeakSanitizer: detected memory leaks\n");
697e8d8bef9SDimitry Andric     Printf("%s", d.Default());
698e8d8bef9SDimitry Andric     report.ReportTopLeaks(flags()->max_leaks);
699e8d8bef9SDimitry Andric   }
700e8d8bef9SDimitry Andric   if (common_flags()->print_suppressions)
701e8d8bef9SDimitry Andric     GetSuppressionContext()->PrintMatchedSuppressions();
702e8d8bef9SDimitry Andric   if (unsuppressed_count > 0) {
703e8d8bef9SDimitry Andric     report.PrintSummary();
704e8d8bef9SDimitry Andric     return true;
705e8d8bef9SDimitry Andric   }
706e8d8bef9SDimitry Andric   return false;
707e8d8bef9SDimitry Andric }
708e8d8bef9SDimitry Andric 
70968d75effSDimitry Andric static bool CheckForLeaks() {
71068d75effSDimitry Andric   if (&__lsan_is_turned_off && __lsan_is_turned_off())
71168d75effSDimitry Andric     return false;
712e8d8bef9SDimitry Andric   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
713e8d8bef9SDimitry Andric   // suppressions. However if a stack id was previously suppressed, it should be
714e8d8bef9SDimitry Andric   // suppressed in future checks as well.
715e8d8bef9SDimitry Andric   for (int i = 0;; ++i) {
71668d75effSDimitry Andric     EnsureMainThreadIDIsCorrect();
71768d75effSDimitry Andric     CheckForLeaksParam param;
71868d75effSDimitry Andric     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
71968d75effSDimitry Andric     if (!param.success) {
72068d75effSDimitry Andric       Report("LeakSanitizer has encountered a fatal error.\n");
72168d75effSDimitry Andric       Report(
72268d75effSDimitry Andric           "HINT: For debugging, try setting environment variable "
72368d75effSDimitry Andric           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
72468d75effSDimitry Andric       Report(
725e8d8bef9SDimitry Andric           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
726e8d8bef9SDimitry Andric           "etc)\n");
72768d75effSDimitry Andric       Die();
72868d75effSDimitry Andric     }
729*0eae32dcSDimitry Andric     LeakReport leak_report;
730*0eae32dcSDimitry Andric     leak_report.AddLeakedChunks(param.leaks);
731*0eae32dcSDimitry Andric 
732e8d8bef9SDimitry Andric     // No new suppressions stacks, so rerun will not help and we can report.
733*0eae32dcSDimitry Andric     if (!leak_report.ApplySuppressions())
734*0eae32dcSDimitry Andric       return PrintResults(leak_report);
735e8d8bef9SDimitry Andric 
736e8d8bef9SDimitry Andric     // No indirect leaks to report, so we are done here.
737*0eae32dcSDimitry Andric     if (!leak_report.IndirectUnsuppressedLeakCount())
738*0eae32dcSDimitry Andric       return PrintResults(leak_report);
739e8d8bef9SDimitry Andric 
740e8d8bef9SDimitry Andric     if (i >= 8) {
741e8d8bef9SDimitry Andric       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
742*0eae32dcSDimitry Andric       return PrintResults(leak_report);
74368d75effSDimitry Andric     }
744e8d8bef9SDimitry Andric 
745e8d8bef9SDimitry Andric     // We found a new previously unseen suppressed call stack. Rerun to make
746e8d8bef9SDimitry Andric     // sure it does not hold indirect leaks.
747e8d8bef9SDimitry Andric     VReport(1, "Rerun with %zu suppressed stacks.",
748e8d8bef9SDimitry Andric             GetSuppressionContext()->GetSortedSuppressedStacks().size());
74968d75effSDimitry Andric   }
75068d75effSDimitry Andric }
75168d75effSDimitry Andric 
75268d75effSDimitry Andric static bool has_reported_leaks = false;
75368d75effSDimitry Andric bool HasReportedLeaks() { return has_reported_leaks; }
75468d75effSDimitry Andric 
75568d75effSDimitry Andric void DoLeakCheck() {
756349cc55cSDimitry Andric   Lock l(&global_mutex);
75768d75effSDimitry Andric   static bool already_done;
758*0eae32dcSDimitry Andric   if (already_done)
759*0eae32dcSDimitry Andric     return;
76068d75effSDimitry Andric   already_done = true;
76168d75effSDimitry Andric   has_reported_leaks = CheckForLeaks();
762*0eae32dcSDimitry Andric   if (has_reported_leaks)
763*0eae32dcSDimitry Andric     HandleLeaks();
76468d75effSDimitry Andric }
76568d75effSDimitry Andric 
76668d75effSDimitry Andric static int DoRecoverableLeakCheck() {
767349cc55cSDimitry Andric   Lock l(&global_mutex);
76868d75effSDimitry Andric   bool have_leaks = CheckForLeaks();
76968d75effSDimitry Andric   return have_leaks ? 1 : 0;
77068d75effSDimitry Andric }
77168d75effSDimitry Andric 
77268d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
77368d75effSDimitry Andric 
77468d75effSDimitry Andric ///// LeakReport implementation. /////
77568d75effSDimitry Andric 
77668d75effSDimitry Andric // A hard limit on the number of distinct leaks, to avoid quadratic complexity
77768d75effSDimitry Andric // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
77868d75effSDimitry Andric // in real-world applications.
779*0eae32dcSDimitry Andric // FIXME: Get rid of this limit by moving logic into DedupLeaks.
78068d75effSDimitry Andric const uptr kMaxLeaksConsidered = 5000;
78168d75effSDimitry Andric 
782*0eae32dcSDimitry Andric void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
783*0eae32dcSDimitry Andric   for (const LeakedChunk &leak : chunks) {
784*0eae32dcSDimitry Andric     uptr chunk = leak.chunk;
785*0eae32dcSDimitry Andric     u32 stack_trace_id = leak.stack_trace_id;
786*0eae32dcSDimitry Andric     uptr leaked_size = leak.leaked_size;
787*0eae32dcSDimitry Andric     ChunkTag tag = leak.tag;
78868d75effSDimitry Andric     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
789349cc55cSDimitry Andric 
790349cc55cSDimitry Andric     if (u32 resolution = flags()->resolution) {
791349cc55cSDimitry Andric       StackTrace stack = StackDepotGet(stack_trace_id);
792349cc55cSDimitry Andric       stack.size = Min(stack.size, resolution);
793349cc55cSDimitry Andric       stack_trace_id = StackDepotPut(stack);
794349cc55cSDimitry Andric     }
795349cc55cSDimitry Andric 
79668d75effSDimitry Andric     bool is_directly_leaked = (tag == kDirectlyLeaked);
79768d75effSDimitry Andric     uptr i;
79868d75effSDimitry Andric     for (i = 0; i < leaks_.size(); i++) {
79968d75effSDimitry Andric       if (leaks_[i].stack_trace_id == stack_trace_id &&
80068d75effSDimitry Andric           leaks_[i].is_directly_leaked == is_directly_leaked) {
80168d75effSDimitry Andric         leaks_[i].hit_count++;
80268d75effSDimitry Andric         leaks_[i].total_size += leaked_size;
80368d75effSDimitry Andric         break;
80468d75effSDimitry Andric       }
80568d75effSDimitry Andric     }
80668d75effSDimitry Andric     if (i == leaks_.size()) {
807*0eae32dcSDimitry Andric       if (leaks_.size() == kMaxLeaksConsidered)
808*0eae32dcSDimitry Andric         return;
809*0eae32dcSDimitry Andric       Leak leak = {next_id_++,         /* hit_count */ 1,
810*0eae32dcSDimitry Andric                    leaked_size,        stack_trace_id,
81168d75effSDimitry Andric                    is_directly_leaked, /* is_suppressed */ false};
81268d75effSDimitry Andric       leaks_.push_back(leak);
81368d75effSDimitry Andric     }
81468d75effSDimitry Andric     if (flags()->report_objects) {
81568d75effSDimitry Andric       LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
81668d75effSDimitry Andric       leaked_objects_.push_back(obj);
81768d75effSDimitry Andric     }
81868d75effSDimitry Andric   }
819*0eae32dcSDimitry Andric }
82068d75effSDimitry Andric 
82168d75effSDimitry Andric static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
82268d75effSDimitry Andric   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
82368d75effSDimitry Andric     return leak1.total_size > leak2.total_size;
82468d75effSDimitry Andric   else
82568d75effSDimitry Andric     return leak1.is_directly_leaked;
82668d75effSDimitry Andric }
82768d75effSDimitry Andric 
82868d75effSDimitry Andric void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
82968d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
83068d75effSDimitry Andric   Printf("\n");
83168d75effSDimitry Andric   if (leaks_.size() == kMaxLeaksConsidered)
832*0eae32dcSDimitry Andric     Printf(
833*0eae32dcSDimitry Andric         "Too many leaks! Only the first %zu leaks encountered will be "
83468d75effSDimitry Andric         "reported.\n",
83568d75effSDimitry Andric         kMaxLeaksConsidered);
83668d75effSDimitry Andric 
83768d75effSDimitry Andric   uptr unsuppressed_count = UnsuppressedLeakCount();
83868d75effSDimitry Andric   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
83968d75effSDimitry Andric     Printf("The %zu top leak(s):\n", num_leaks_to_report);
84068d75effSDimitry Andric   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
84168d75effSDimitry Andric   uptr leaks_reported = 0;
84268d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
843*0eae32dcSDimitry Andric     if (leaks_[i].is_suppressed)
844*0eae32dcSDimitry Andric       continue;
84568d75effSDimitry Andric     PrintReportForLeak(i);
84668d75effSDimitry Andric     leaks_reported++;
847*0eae32dcSDimitry Andric     if (leaks_reported == num_leaks_to_report)
848*0eae32dcSDimitry Andric       break;
84968d75effSDimitry Andric   }
85068d75effSDimitry Andric   if (leaks_reported < unsuppressed_count) {
85168d75effSDimitry Andric     uptr remaining = unsuppressed_count - leaks_reported;
85268d75effSDimitry Andric     Printf("Omitting %zu more leak(s).\n", remaining);
85368d75effSDimitry Andric   }
85468d75effSDimitry Andric }
85568d75effSDimitry Andric 
85668d75effSDimitry Andric void LeakReport::PrintReportForLeak(uptr index) {
85768d75effSDimitry Andric   Decorator d;
85868d75effSDimitry Andric   Printf("%s", d.Leak());
85968d75effSDimitry Andric   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
86068d75effSDimitry Andric          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
86168d75effSDimitry Andric          leaks_[index].total_size, leaks_[index].hit_count);
86268d75effSDimitry Andric   Printf("%s", d.Default());
86368d75effSDimitry Andric 
864349cc55cSDimitry Andric   CHECK(leaks_[index].stack_trace_id);
865349cc55cSDimitry Andric   StackDepotGet(leaks_[index].stack_trace_id).Print();
86668d75effSDimitry Andric 
86768d75effSDimitry Andric   if (flags()->report_objects) {
86868d75effSDimitry Andric     Printf("Objects leaked above:\n");
86968d75effSDimitry Andric     PrintLeakedObjectsForLeak(index);
87068d75effSDimitry Andric     Printf("\n");
87168d75effSDimitry Andric   }
87268d75effSDimitry Andric }
87368d75effSDimitry Andric 
87468d75effSDimitry Andric void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
87568d75effSDimitry Andric   u32 leak_id = leaks_[index].id;
87668d75effSDimitry Andric   for (uptr j = 0; j < leaked_objects_.size(); j++) {
87768d75effSDimitry Andric     if (leaked_objects_[j].leak_id == leak_id)
878349cc55cSDimitry Andric       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
87968d75effSDimitry Andric              leaked_objects_[j].size);
88068d75effSDimitry Andric   }
88168d75effSDimitry Andric }
88268d75effSDimitry Andric 
88368d75effSDimitry Andric void LeakReport::PrintSummary() {
88468d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
88568d75effSDimitry Andric   uptr bytes = 0, allocations = 0;
88668d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
887*0eae32dcSDimitry Andric     if (leaks_[i].is_suppressed)
888*0eae32dcSDimitry Andric       continue;
88968d75effSDimitry Andric     bytes += leaks_[i].total_size;
89068d75effSDimitry Andric     allocations += leaks_[i].hit_count;
89168d75effSDimitry Andric   }
892fe6060f1SDimitry Andric   InternalScopedString summary;
89368d75effSDimitry Andric   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
89468d75effSDimitry Andric                  allocations);
89568d75effSDimitry Andric   ReportErrorSummary(summary.data());
89668d75effSDimitry Andric }
89768d75effSDimitry Andric 
898e8d8bef9SDimitry Andric uptr LeakReport::ApplySuppressions() {
899e8d8bef9SDimitry Andric   LeakSuppressionContext *suppressions = GetSuppressionContext();
900e8d8bef9SDimitry Andric   uptr new_suppressions = false;
90168d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
902*0eae32dcSDimitry Andric     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
903*0eae32dcSDimitry Andric                                leaks_[i].total_size)) {
90468d75effSDimitry Andric       leaks_[i].is_suppressed = true;
905e8d8bef9SDimitry Andric       ++new_suppressions;
90668d75effSDimitry Andric     }
90768d75effSDimitry Andric   }
908e8d8bef9SDimitry Andric   return new_suppressions;
90968d75effSDimitry Andric }
91068d75effSDimitry Andric 
91168d75effSDimitry Andric uptr LeakReport::UnsuppressedLeakCount() {
91268d75effSDimitry Andric   uptr result = 0;
91368d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++)
914*0eae32dcSDimitry Andric     if (!leaks_[i].is_suppressed)
915*0eae32dcSDimitry Andric       result++;
91668d75effSDimitry Andric   return result;
91768d75effSDimitry Andric }
91868d75effSDimitry Andric 
919e8d8bef9SDimitry Andric uptr LeakReport::IndirectUnsuppressedLeakCount() {
920e8d8bef9SDimitry Andric   uptr result = 0;
921e8d8bef9SDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++)
922e8d8bef9SDimitry Andric     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
923e8d8bef9SDimitry Andric       result++;
924e8d8bef9SDimitry Andric   return result;
925e8d8bef9SDimitry Andric }
926e8d8bef9SDimitry Andric 
92768d75effSDimitry Andric }  // namespace __lsan
92868d75effSDimitry Andric #else   // CAN_SANITIZE_LEAKS
92968d75effSDimitry Andric namespace __lsan {
93068d75effSDimitry Andric void InitCommonLsan() {}
93168d75effSDimitry Andric void DoLeakCheck() {}
93268d75effSDimitry Andric void DoRecoverableLeakCheckVoid() {}
93368d75effSDimitry Andric void DisableInThisThread() {}
93468d75effSDimitry Andric void EnableInThisThread() {}
935*0eae32dcSDimitry Andric }  // namespace __lsan
93668d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
93768d75effSDimitry Andric 
93868d75effSDimitry Andric using namespace __lsan;
93968d75effSDimitry Andric 
94068d75effSDimitry Andric extern "C" {
94168d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
94268d75effSDimitry Andric void __lsan_ignore_object(const void *p) {
94368d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
94468d75effSDimitry Andric   if (!common_flags()->detect_leaks)
94568d75effSDimitry Andric     return;
94668d75effSDimitry Andric   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
94768d75effSDimitry Andric   // locked.
948349cc55cSDimitry Andric   Lock l(&global_mutex);
94968d75effSDimitry Andric   IgnoreObjectResult res = IgnoreObjectLocked(p);
95068d75effSDimitry Andric   if (res == kIgnoreObjectInvalid)
95168d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
95268d75effSDimitry Andric   if (res == kIgnoreObjectAlreadyIgnored)
953*0eae32dcSDimitry Andric     VReport(1,
954*0eae32dcSDimitry Andric             "__lsan_ignore_object(): "
955*0eae32dcSDimitry Andric             "heap object at %p is already being ignored\n",
956*0eae32dcSDimitry Andric             p);
95768d75effSDimitry Andric   if (res == kIgnoreObjectSuccess)
95868d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
95968d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
96068d75effSDimitry Andric }
96168d75effSDimitry Andric 
96268d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
96368d75effSDimitry Andric void __lsan_register_root_region(const void *begin, uptr size) {
96468d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
965349cc55cSDimitry Andric   Lock l(&global_mutex);
96668d75effSDimitry Andric   RootRegion region = {reinterpret_cast<uptr>(begin), size};
967349cc55cSDimitry Andric   root_regions.push_back(region);
968349cc55cSDimitry Andric   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
96968d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
97068d75effSDimitry Andric }
97168d75effSDimitry Andric 
97268d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
97368d75effSDimitry Andric void __lsan_unregister_root_region(const void *begin, uptr size) {
97468d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
975349cc55cSDimitry Andric   Lock l(&global_mutex);
97668d75effSDimitry Andric   bool removed = false;
977349cc55cSDimitry Andric   for (uptr i = 0; i < root_regions.size(); i++) {
978349cc55cSDimitry Andric     RootRegion region = root_regions[i];
97968d75effSDimitry Andric     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
98068d75effSDimitry Andric       removed = true;
981349cc55cSDimitry Andric       uptr last_index = root_regions.size() - 1;
982349cc55cSDimitry Andric       root_regions[i] = root_regions[last_index];
983349cc55cSDimitry Andric       root_regions.pop_back();
984349cc55cSDimitry Andric       VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
98568d75effSDimitry Andric       break;
98668d75effSDimitry Andric     }
98768d75effSDimitry Andric   }
98868d75effSDimitry Andric   if (!removed) {
98968d75effSDimitry Andric     Report(
990349cc55cSDimitry Andric         "__lsan_unregister_root_region(): region at %p of size %zu has not "
99168d75effSDimitry Andric         "been registered.\n",
99268d75effSDimitry Andric         begin, size);
99368d75effSDimitry Andric     Die();
99468d75effSDimitry Andric   }
99568d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
99668d75effSDimitry Andric }
99768d75effSDimitry Andric 
99868d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
99968d75effSDimitry Andric void __lsan_disable() {
100068d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
100168d75effSDimitry Andric   __lsan::DisableInThisThread();
100268d75effSDimitry Andric #endif
100368d75effSDimitry Andric }
100468d75effSDimitry Andric 
100568d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
100668d75effSDimitry Andric void __lsan_enable() {
100768d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
100868d75effSDimitry Andric   __lsan::EnableInThisThread();
100968d75effSDimitry Andric #endif
101068d75effSDimitry Andric }
101168d75effSDimitry Andric 
101268d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
101368d75effSDimitry Andric void __lsan_do_leak_check() {
101468d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
101568d75effSDimitry Andric   if (common_flags()->detect_leaks)
101668d75effSDimitry Andric     __lsan::DoLeakCheck();
101768d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
101868d75effSDimitry Andric }
101968d75effSDimitry Andric 
102068d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
102168d75effSDimitry Andric int __lsan_do_recoverable_leak_check() {
102268d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
102368d75effSDimitry Andric   if (common_flags()->detect_leaks)
102468d75effSDimitry Andric     return __lsan::DoRecoverableLeakCheck();
102568d75effSDimitry Andric #endif  // CAN_SANITIZE_LEAKS
102668d75effSDimitry Andric   return 0;
102768d75effSDimitry Andric }
102868d75effSDimitry Andric 
1029e8d8bef9SDimitry Andric SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
103068d75effSDimitry Andric   return "";
103168d75effSDimitry Andric }
103268d75effSDimitry Andric 
1033e8d8bef9SDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1034*0eae32dcSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int
1035*0eae32dcSDimitry Andric __lsan_is_turned_off() {
103668d75effSDimitry Andric   return 0;
103768d75effSDimitry Andric }
103868d75effSDimitry Andric 
1039*0eae32dcSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
1040*0eae32dcSDimitry Andric __lsan_default_suppressions() {
104168d75effSDimitry Andric   return "";
104268d75effSDimitry Andric }
104368d75effSDimitry Andric #endif
104468d75effSDimitry Andric }  // extern "C"
1045