xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision 5ffd83dbcc34f10e07f6d3e968ae6365869615f4)
168d75effSDimitry Andric //=-- lsan_common.cpp -----------------------------------------------------===//
268d75effSDimitry Andric //
368d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
468d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
568d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
668d75effSDimitry Andric //
768d75effSDimitry Andric //===----------------------------------------------------------------------===//
868d75effSDimitry Andric //
968d75effSDimitry Andric // This file is a part of LeakSanitizer.
1068d75effSDimitry Andric // Implementation of common leak checking functionality.
1168d75effSDimitry Andric //
1268d75effSDimitry Andric //===----------------------------------------------------------------------===//
1368d75effSDimitry Andric 
1468d75effSDimitry Andric #include "lsan_common.h"
1568d75effSDimitry Andric 
1668d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h"
1768d75effSDimitry Andric #include "sanitizer_common/sanitizer_flag_parser.h"
1868d75effSDimitry Andric #include "sanitizer_common/sanitizer_flags.h"
1968d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h"
2068d75effSDimitry Andric #include "sanitizer_common/sanitizer_procmaps.h"
2168d75effSDimitry Andric #include "sanitizer_common/sanitizer_report_decorator.h"
2268d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
2368d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h"
2468d75effSDimitry Andric #include "sanitizer_common/sanitizer_suppressions.h"
2568d75effSDimitry Andric #include "sanitizer_common/sanitizer_thread_registry.h"
2668d75effSDimitry Andric #include "sanitizer_common/sanitizer_tls_get_addr.h"
2768d75effSDimitry Andric 
28*5ffd83dbSDimitry Andric extern "C" const char *__lsan_current_stage = "unknown";
29*5ffd83dbSDimitry Andric 
3068d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
3168d75effSDimitry Andric namespace __lsan {
3268d75effSDimitry Andric 
3368d75effSDimitry Andric // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
3468d75effSDimitry Andric // also to protect the global list of root regions.
3568d75effSDimitry Andric BlockingMutex global_mutex(LINKER_INITIALIZED);
3668d75effSDimitry Andric 
3768d75effSDimitry Andric Flags lsan_flags;
3868d75effSDimitry Andric 
39*5ffd83dbSDimitry Andric 
4068d75effSDimitry Andric void DisableCounterUnderflow() {
4168d75effSDimitry Andric   if (common_flags()->detect_leaks) {
4268d75effSDimitry Andric     Report("Unmatched call to __lsan_enable().\n");
4368d75effSDimitry Andric     Die();
4468d75effSDimitry Andric   }
4568d75effSDimitry Andric }
4668d75effSDimitry Andric 
4768d75effSDimitry Andric void Flags::SetDefaults() {
4868d75effSDimitry Andric #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
4968d75effSDimitry Andric #include "lsan_flags.inc"
5068d75effSDimitry Andric #undef LSAN_FLAG
5168d75effSDimitry Andric }
5268d75effSDimitry Andric 
5368d75effSDimitry Andric void RegisterLsanFlags(FlagParser *parser, Flags *f) {
5468d75effSDimitry Andric #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
5568d75effSDimitry Andric   RegisterFlag(parser, #Name, Description, &f->Name);
5668d75effSDimitry Andric #include "lsan_flags.inc"
5768d75effSDimitry Andric #undef LSAN_FLAG
5868d75effSDimitry Andric }
5968d75effSDimitry Andric 
6068d75effSDimitry Andric #define LOG_POINTERS(...)                           \
6168d75effSDimitry Andric   do {                                              \
6268d75effSDimitry Andric     if (flags()->log_pointers) Report(__VA_ARGS__); \
6368d75effSDimitry Andric   } while (0)
6468d75effSDimitry Andric 
6568d75effSDimitry Andric #define LOG_THREADS(...)                           \
6668d75effSDimitry Andric   do {                                             \
6768d75effSDimitry Andric     if (flags()->log_threads) Report(__VA_ARGS__); \
6868d75effSDimitry Andric   } while (0)
6968d75effSDimitry Andric 
7068d75effSDimitry Andric ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
7168d75effSDimitry Andric static SuppressionContext *suppression_ctx = nullptr;
7268d75effSDimitry Andric static const char kSuppressionLeak[] = "leak";
7368d75effSDimitry Andric static const char *kSuppressionTypes[] = { kSuppressionLeak };
7468d75effSDimitry Andric static const char kStdSuppressions[] =
7568d75effSDimitry Andric #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
7668d75effSDimitry Andric   // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
7768d75effSDimitry Andric   // definition.
7868d75effSDimitry Andric   "leak:*pthread_exit*\n"
7968d75effSDimitry Andric #endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
8068d75effSDimitry Andric #if SANITIZER_MAC
8168d75effSDimitry Andric   // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
8268d75effSDimitry Andric   "leak:*_os_trace*\n"
8368d75effSDimitry Andric #endif
8468d75effSDimitry Andric   // TLS leak in some glibc versions, described in
8568d75effSDimitry Andric   // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
8668d75effSDimitry Andric   "leak:*tls_get_addr*\n";
8768d75effSDimitry Andric 
8868d75effSDimitry Andric void InitializeSuppressions() {
8968d75effSDimitry Andric   CHECK_EQ(nullptr, suppression_ctx);
9068d75effSDimitry Andric   suppression_ctx = new (suppression_placeholder)
9168d75effSDimitry Andric       SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
9268d75effSDimitry Andric   suppression_ctx->ParseFromFile(flags()->suppressions);
9368d75effSDimitry Andric   if (&__lsan_default_suppressions)
9468d75effSDimitry Andric     suppression_ctx->Parse(__lsan_default_suppressions());
9568d75effSDimitry Andric   suppression_ctx->Parse(kStdSuppressions);
9668d75effSDimitry Andric }
9768d75effSDimitry Andric 
9868d75effSDimitry Andric static SuppressionContext *GetSuppressionContext() {
9968d75effSDimitry Andric   CHECK(suppression_ctx);
10068d75effSDimitry Andric   return suppression_ctx;
10168d75effSDimitry Andric }
10268d75effSDimitry Andric 
10368d75effSDimitry Andric static InternalMmapVector<RootRegion> *root_regions;
10468d75effSDimitry Andric 
10568d75effSDimitry Andric InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
10668d75effSDimitry Andric 
10768d75effSDimitry Andric void InitializeRootRegions() {
10868d75effSDimitry Andric   CHECK(!root_regions);
10968d75effSDimitry Andric   ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
11068d75effSDimitry Andric   root_regions = new (placeholder) InternalMmapVector<RootRegion>();
11168d75effSDimitry Andric }
11268d75effSDimitry Andric 
11368d75effSDimitry Andric const char *MaybeCallLsanDefaultOptions() {
11468d75effSDimitry Andric   return (&__lsan_default_options) ? __lsan_default_options() : "";
11568d75effSDimitry Andric }
11668d75effSDimitry Andric 
11768d75effSDimitry Andric void InitCommonLsan() {
11868d75effSDimitry Andric   InitializeRootRegions();
11968d75effSDimitry Andric   if (common_flags()->detect_leaks) {
12068d75effSDimitry Andric     // Initialization which can fail or print warnings should only be done if
12168d75effSDimitry Andric     // LSan is actually enabled.
12268d75effSDimitry Andric     InitializeSuppressions();
12368d75effSDimitry Andric     InitializePlatformSpecificModules();
12468d75effSDimitry Andric   }
12568d75effSDimitry Andric }
12668d75effSDimitry Andric 
12768d75effSDimitry Andric class Decorator: public __sanitizer::SanitizerCommonDecorator {
12868d75effSDimitry Andric  public:
12968d75effSDimitry Andric   Decorator() : SanitizerCommonDecorator() { }
13068d75effSDimitry Andric   const char *Error() { return Red(); }
13168d75effSDimitry Andric   const char *Leak() { return Blue(); }
13268d75effSDimitry Andric };
13368d75effSDimitry Andric 
13468d75effSDimitry Andric static inline bool CanBeAHeapPointer(uptr p) {
13568d75effSDimitry Andric   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
13668d75effSDimitry Andric   // bound on heap addresses.
13768d75effSDimitry Andric   const uptr kMinAddress = 4 * 4096;
13868d75effSDimitry Andric   if (p < kMinAddress) return false;
13968d75effSDimitry Andric #if defined(__x86_64__)
14068d75effSDimitry Andric   // Accept only canonical form user-space addresses.
14168d75effSDimitry Andric   return ((p >> 47) == 0);
14268d75effSDimitry Andric #elif defined(__mips64)
14368d75effSDimitry Andric   return ((p >> 40) == 0);
14468d75effSDimitry Andric #elif defined(__aarch64__)
14568d75effSDimitry Andric   unsigned runtimeVMA =
14668d75effSDimitry Andric     (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
14768d75effSDimitry Andric   return ((p >> runtimeVMA) == 0);
14868d75effSDimitry Andric #else
14968d75effSDimitry Andric   return true;
15068d75effSDimitry Andric #endif
15168d75effSDimitry Andric }
15268d75effSDimitry Andric 
15368d75effSDimitry Andric // Scans the memory range, looking for byte patterns that point into allocator
15468d75effSDimitry Andric // chunks. Marks those chunks with |tag| and adds them to |frontier|.
15568d75effSDimitry Andric // There are two usage modes for this function: finding reachable chunks
15668d75effSDimitry Andric // (|tag| = kReachable) and finding indirectly leaked chunks
15768d75effSDimitry Andric // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
15868d75effSDimitry Andric // so |frontier| = 0.
15968d75effSDimitry Andric void ScanRangeForPointers(uptr begin, uptr end,
16068d75effSDimitry Andric                           Frontier *frontier,
16168d75effSDimitry Andric                           const char *region_type, ChunkTag tag) {
16268d75effSDimitry Andric   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
16368d75effSDimitry Andric   const uptr alignment = flags()->pointer_alignment();
16468d75effSDimitry Andric   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
16568d75effSDimitry Andric   uptr pp = begin;
16668d75effSDimitry Andric   if (pp % alignment)
16768d75effSDimitry Andric     pp = pp + alignment - pp % alignment;
16868d75effSDimitry Andric   for (; pp + sizeof(void *) <= end; pp += alignment) {
16968d75effSDimitry Andric     void *p = *reinterpret_cast<void **>(pp);
17068d75effSDimitry Andric     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
17168d75effSDimitry Andric     uptr chunk = PointsIntoChunk(p);
17268d75effSDimitry Andric     if (!chunk) continue;
17368d75effSDimitry Andric     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
17468d75effSDimitry Andric     if (chunk == begin) continue;
17568d75effSDimitry Andric     LsanMetadata m(chunk);
17668d75effSDimitry Andric     if (m.tag() == kReachable || m.tag() == kIgnored) continue;
17768d75effSDimitry Andric 
17868d75effSDimitry Andric     // Do this check relatively late so we can log only the interesting cases.
17968d75effSDimitry Andric     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
18068d75effSDimitry Andric       LOG_POINTERS(
18168d75effSDimitry Andric           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
18268d75effSDimitry Andric           "%zu.\n",
18368d75effSDimitry Andric           pp, p, chunk, chunk + m.requested_size(), m.requested_size());
18468d75effSDimitry Andric       continue;
18568d75effSDimitry Andric     }
18668d75effSDimitry Andric 
18768d75effSDimitry Andric     m.set_tag(tag);
18868d75effSDimitry Andric     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
18968d75effSDimitry Andric                  chunk, chunk + m.requested_size(), m.requested_size());
19068d75effSDimitry Andric     if (frontier)
19168d75effSDimitry Andric       frontier->push_back(chunk);
19268d75effSDimitry Andric   }
19368d75effSDimitry Andric }
19468d75effSDimitry Andric 
19568d75effSDimitry Andric // Scans a global range for pointers
19668d75effSDimitry Andric void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
19768d75effSDimitry Andric   uptr allocator_begin = 0, allocator_end = 0;
19868d75effSDimitry Andric   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
19968d75effSDimitry Andric   if (begin <= allocator_begin && allocator_begin < end) {
20068d75effSDimitry Andric     CHECK_LE(allocator_begin, allocator_end);
20168d75effSDimitry Andric     CHECK_LE(allocator_end, end);
20268d75effSDimitry Andric     if (begin < allocator_begin)
20368d75effSDimitry Andric       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
20468d75effSDimitry Andric                            kReachable);
20568d75effSDimitry Andric     if (allocator_end < end)
20668d75effSDimitry Andric       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
20768d75effSDimitry Andric   } else {
20868d75effSDimitry Andric     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
20968d75effSDimitry Andric   }
21068d75effSDimitry Andric }
21168d75effSDimitry Andric 
21268d75effSDimitry Andric void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
21368d75effSDimitry Andric   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
21468d75effSDimitry Andric   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
21568d75effSDimitry Andric }
21668d75effSDimitry Andric 
217*5ffd83dbSDimitry Andric #if SANITIZER_FUCHSIA
218*5ffd83dbSDimitry Andric 
219*5ffd83dbSDimitry Andric // Fuchsia handles all threads together with its own callback.
220*5ffd83dbSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
221*5ffd83dbSDimitry Andric 
222*5ffd83dbSDimitry Andric #else
223*5ffd83dbSDimitry Andric 
22468d75effSDimitry Andric // Scans thread data (stacks and TLS) for heap pointers.
22568d75effSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
22668d75effSDimitry Andric                            Frontier *frontier) {
22768d75effSDimitry Andric   InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
22868d75effSDimitry Andric   uptr registers_begin = reinterpret_cast<uptr>(registers.data());
22968d75effSDimitry Andric   uptr registers_end =
23068d75effSDimitry Andric       reinterpret_cast<uptr>(registers.data() + registers.size());
23168d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
23268d75effSDimitry Andric     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
23368d75effSDimitry Andric     LOG_THREADS("Processing thread %d.\n", os_id);
23468d75effSDimitry Andric     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
23568d75effSDimitry Andric     DTLS *dtls;
23668d75effSDimitry Andric     bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
23768d75effSDimitry Andric                                               &tls_begin, &tls_end,
23868d75effSDimitry Andric                                               &cache_begin, &cache_end, &dtls);
23968d75effSDimitry Andric     if (!thread_found) {
24068d75effSDimitry Andric       // If a thread can't be found in the thread registry, it's probably in the
24168d75effSDimitry Andric       // process of destruction. Log this event and move on.
24268d75effSDimitry Andric       LOG_THREADS("Thread %d not found in registry.\n", os_id);
24368d75effSDimitry Andric       continue;
24468d75effSDimitry Andric     }
24568d75effSDimitry Andric     uptr sp;
24668d75effSDimitry Andric     PtraceRegistersStatus have_registers =
24768d75effSDimitry Andric         suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
24868d75effSDimitry Andric     if (have_registers != REGISTERS_AVAILABLE) {
24968d75effSDimitry Andric       Report("Unable to get registers from thread %d.\n", os_id);
25068d75effSDimitry Andric       // If unable to get SP, consider the entire stack to be reachable unless
25168d75effSDimitry Andric       // GetRegistersAndSP failed with ESRCH.
25268d75effSDimitry Andric       if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
25368d75effSDimitry Andric       sp = stack_begin;
25468d75effSDimitry Andric     }
25568d75effSDimitry Andric 
25668d75effSDimitry Andric     if (flags()->use_registers && have_registers)
25768d75effSDimitry Andric       ScanRangeForPointers(registers_begin, registers_end, frontier,
25868d75effSDimitry Andric                            "REGISTERS", kReachable);
25968d75effSDimitry Andric 
26068d75effSDimitry Andric     if (flags()->use_stacks) {
26168d75effSDimitry Andric       LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
26268d75effSDimitry Andric       if (sp < stack_begin || sp >= stack_end) {
26368d75effSDimitry Andric         // SP is outside the recorded stack range (e.g. the thread is running a
26468d75effSDimitry Andric         // signal handler on alternate stack, or swapcontext was used).
26568d75effSDimitry Andric         // Again, consider the entire stack range to be reachable.
26668d75effSDimitry Andric         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
26768d75effSDimitry Andric         uptr page_size = GetPageSizeCached();
26868d75effSDimitry Andric         int skipped = 0;
26968d75effSDimitry Andric         while (stack_begin < stack_end &&
27068d75effSDimitry Andric                !IsAccessibleMemoryRange(stack_begin, 1)) {
27168d75effSDimitry Andric           skipped++;
27268d75effSDimitry Andric           stack_begin += page_size;
27368d75effSDimitry Andric         }
27468d75effSDimitry Andric         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
27568d75effSDimitry Andric                     skipped, stack_begin, stack_end);
27668d75effSDimitry Andric       } else {
27768d75effSDimitry Andric         // Shrink the stack range to ignore out-of-scope values.
27868d75effSDimitry Andric         stack_begin = sp;
27968d75effSDimitry Andric       }
28068d75effSDimitry Andric       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
28168d75effSDimitry Andric                            kReachable);
28268d75effSDimitry Andric       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
28368d75effSDimitry Andric     }
28468d75effSDimitry Andric 
28568d75effSDimitry Andric     if (flags()->use_tls) {
28668d75effSDimitry Andric       if (tls_begin) {
28768d75effSDimitry Andric         LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
28868d75effSDimitry Andric         // If the tls and cache ranges don't overlap, scan full tls range,
28968d75effSDimitry Andric         // otherwise, only scan the non-overlapping portions
29068d75effSDimitry Andric         if (cache_begin == cache_end || tls_end < cache_begin ||
29168d75effSDimitry Andric             tls_begin > cache_end) {
29268d75effSDimitry Andric           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
29368d75effSDimitry Andric         } else {
29468d75effSDimitry Andric           if (tls_begin < cache_begin)
29568d75effSDimitry Andric             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
29668d75effSDimitry Andric                                  kReachable);
29768d75effSDimitry Andric           if (tls_end > cache_end)
29868d75effSDimitry Andric             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
29968d75effSDimitry Andric                                  kReachable);
30068d75effSDimitry Andric         }
30168d75effSDimitry Andric       }
30268d75effSDimitry Andric       if (dtls && !DTLSInDestruction(dtls)) {
30368d75effSDimitry Andric         for (uptr j = 0; j < dtls->dtv_size; ++j) {
30468d75effSDimitry Andric           uptr dtls_beg = dtls->dtv[j].beg;
30568d75effSDimitry Andric           uptr dtls_end = dtls_beg + dtls->dtv[j].size;
30668d75effSDimitry Andric           if (dtls_beg < dtls_end) {
30768d75effSDimitry Andric             LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
30868d75effSDimitry Andric             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
30968d75effSDimitry Andric                                  kReachable);
31068d75effSDimitry Andric           }
31168d75effSDimitry Andric         }
31268d75effSDimitry Andric       } else {
31368d75effSDimitry Andric         // We are handling a thread with DTLS under destruction. Log about
31468d75effSDimitry Andric         // this and continue.
31568d75effSDimitry Andric         LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
31668d75effSDimitry Andric       }
31768d75effSDimitry Andric     }
31868d75effSDimitry Andric   }
31968d75effSDimitry Andric }
32068d75effSDimitry Andric 
321*5ffd83dbSDimitry Andric #endif  // SANITIZER_FUCHSIA
322*5ffd83dbSDimitry Andric 
32368d75effSDimitry Andric void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
32468d75effSDimitry Andric                     uptr region_begin, uptr region_end, bool is_readable) {
32568d75effSDimitry Andric   uptr intersection_begin = Max(root_region.begin, region_begin);
32668d75effSDimitry Andric   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
32768d75effSDimitry Andric   if (intersection_begin >= intersection_end) return;
32868d75effSDimitry Andric   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
32968d75effSDimitry Andric                root_region.begin, root_region.begin + root_region.size,
33068d75effSDimitry Andric                region_begin, region_end,
33168d75effSDimitry Andric                is_readable ? "readable" : "unreadable");
33268d75effSDimitry Andric   if (is_readable)
33368d75effSDimitry Andric     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
33468d75effSDimitry Andric                          kReachable);
33568d75effSDimitry Andric }
33668d75effSDimitry Andric 
33768d75effSDimitry Andric static void ProcessRootRegion(Frontier *frontier,
33868d75effSDimitry Andric                               const RootRegion &root_region) {
33968d75effSDimitry Andric   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
34068d75effSDimitry Andric   MemoryMappedSegment segment;
34168d75effSDimitry Andric   while (proc_maps.Next(&segment)) {
34268d75effSDimitry Andric     ScanRootRegion(frontier, root_region, segment.start, segment.end,
34368d75effSDimitry Andric                    segment.IsReadable());
34468d75effSDimitry Andric   }
34568d75effSDimitry Andric }
34668d75effSDimitry Andric 
34768d75effSDimitry Andric // Scans root regions for heap pointers.
34868d75effSDimitry Andric static void ProcessRootRegions(Frontier *frontier) {
34968d75effSDimitry Andric   if (!flags()->use_root_regions) return;
35068d75effSDimitry Andric   CHECK(root_regions);
35168d75effSDimitry Andric   for (uptr i = 0; i < root_regions->size(); i++) {
35268d75effSDimitry Andric     ProcessRootRegion(frontier, (*root_regions)[i]);
35368d75effSDimitry Andric   }
35468d75effSDimitry Andric }
35568d75effSDimitry Andric 
35668d75effSDimitry Andric static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
35768d75effSDimitry Andric   while (frontier->size()) {
35868d75effSDimitry Andric     uptr next_chunk = frontier->back();
35968d75effSDimitry Andric     frontier->pop_back();
36068d75effSDimitry Andric     LsanMetadata m(next_chunk);
36168d75effSDimitry Andric     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
36268d75effSDimitry Andric                          "HEAP", tag);
36368d75effSDimitry Andric   }
36468d75effSDimitry Andric }
36568d75effSDimitry Andric 
36668d75effSDimitry Andric // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
36768d75effSDimitry Andric // which are reachable from it as indirectly leaked.
36868d75effSDimitry Andric static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
369*5ffd83dbSDimitry Andric   __lsan_current_stage = "MarkIndirectlyLeakedCb";
37068d75effSDimitry Andric   chunk = GetUserBegin(chunk);
37168d75effSDimitry Andric   LsanMetadata m(chunk);
37268d75effSDimitry Andric   if (m.allocated() && m.tag() != kReachable) {
37368d75effSDimitry Andric     ScanRangeForPointers(chunk, chunk + m.requested_size(),
37468d75effSDimitry Andric                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
37568d75effSDimitry Andric   }
37668d75effSDimitry Andric }
37768d75effSDimitry Andric 
37868d75effSDimitry Andric // ForEachChunk callback. If chunk is marked as ignored, adds its address to
37968d75effSDimitry Andric // frontier.
38068d75effSDimitry Andric static void CollectIgnoredCb(uptr chunk, void *arg) {
38168d75effSDimitry Andric   CHECK(arg);
382*5ffd83dbSDimitry Andric   __lsan_current_stage = "CollectIgnoredCb";
38368d75effSDimitry Andric   chunk = GetUserBegin(chunk);
38468d75effSDimitry Andric   LsanMetadata m(chunk);
38568d75effSDimitry Andric   if (m.allocated() && m.tag() == kIgnored) {
38668d75effSDimitry Andric     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
38768d75effSDimitry Andric                  chunk, chunk + m.requested_size(), m.requested_size());
38868d75effSDimitry Andric     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
38968d75effSDimitry Andric   }
39068d75effSDimitry Andric }
39168d75effSDimitry Andric 
39268d75effSDimitry Andric static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
39368d75effSDimitry Andric   CHECK(stack_id);
39468d75effSDimitry Andric   StackTrace stack = map->Get(stack_id);
39568d75effSDimitry Andric   // The top frame is our malloc/calloc/etc. The next frame is the caller.
39668d75effSDimitry Andric   if (stack.size >= 2)
39768d75effSDimitry Andric     return stack.trace[1];
39868d75effSDimitry Andric   return 0;
39968d75effSDimitry Andric }
40068d75effSDimitry Andric 
40168d75effSDimitry Andric struct InvalidPCParam {
40268d75effSDimitry Andric   Frontier *frontier;
40368d75effSDimitry Andric   StackDepotReverseMap *stack_depot_reverse_map;
40468d75effSDimitry Andric   bool skip_linker_allocations;
40568d75effSDimitry Andric };
40668d75effSDimitry Andric 
40768d75effSDimitry Andric // ForEachChunk callback. If the caller pc is invalid or is within the linker,
40868d75effSDimitry Andric // mark as reachable. Called by ProcessPlatformSpecificAllocations.
40968d75effSDimitry Andric static void MarkInvalidPCCb(uptr chunk, void *arg) {
41068d75effSDimitry Andric   CHECK(arg);
41168d75effSDimitry Andric   InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
412*5ffd83dbSDimitry Andric   __lsan_current_stage = "MarkInvalidPCCb";
41368d75effSDimitry Andric   chunk = GetUserBegin(chunk);
41468d75effSDimitry Andric   LsanMetadata m(chunk);
41568d75effSDimitry Andric   if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
41668d75effSDimitry Andric     u32 stack_id = m.stack_trace_id();
41768d75effSDimitry Andric     uptr caller_pc = 0;
41868d75effSDimitry Andric     if (stack_id > 0)
41968d75effSDimitry Andric       caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
42068d75effSDimitry Andric     // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
42168d75effSDimitry Andric     // it as reachable, as we can't properly report its allocation stack anyway.
42268d75effSDimitry Andric     if (caller_pc == 0 || (param->skip_linker_allocations &&
42368d75effSDimitry Andric                            GetLinker()->containsAddress(caller_pc))) {
42468d75effSDimitry Andric       m.set_tag(kReachable);
42568d75effSDimitry Andric       param->frontier->push_back(chunk);
42668d75effSDimitry Andric     }
42768d75effSDimitry Andric   }
42868d75effSDimitry Andric }
42968d75effSDimitry Andric 
43068d75effSDimitry Andric // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
43168d75effSDimitry Andric // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
43268d75effSDimitry Andric // modules accounting etc.
43368d75effSDimitry Andric // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
43468d75effSDimitry Andric // They are allocated with a __libc_memalign() call in allocate_and_init()
43568d75effSDimitry Andric // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
43668d75effSDimitry Andric // blocks, but we can make sure they come from our own allocator by intercepting
43768d75effSDimitry Andric // __libc_memalign(). On top of that, there is no easy way to reach them. Their
43868d75effSDimitry Andric // addresses are stored in a dynamically allocated array (the DTV) which is
43968d75effSDimitry Andric // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
44068d75effSDimitry Andric // being reachable from the static TLS, and the dynamic TLS being reachable from
44168d75effSDimitry Andric // the DTV. This is because the initial DTV is allocated before our interception
44268d75effSDimitry Andric // mechanism kicks in, and thus we don't recognize it as allocated memory. We
44368d75effSDimitry Andric // can't special-case it either, since we don't know its size.
44468d75effSDimitry Andric // Our solution is to include in the root set all allocations made from
44568d75effSDimitry Andric // ld-linux.so (which is where allocate_and_init() is implemented). This is
44668d75effSDimitry Andric // guaranteed to include all dynamic TLS blocks (and possibly other allocations
44768d75effSDimitry Andric // which we don't care about).
44868d75effSDimitry Andric // On all other platforms, this simply checks to ensure that the caller pc is
44968d75effSDimitry Andric // valid before reporting chunks as leaked.
45068d75effSDimitry Andric void ProcessPC(Frontier *frontier) {
45168d75effSDimitry Andric   StackDepotReverseMap stack_depot_reverse_map;
45268d75effSDimitry Andric   InvalidPCParam arg;
45368d75effSDimitry Andric   arg.frontier = frontier;
45468d75effSDimitry Andric   arg.stack_depot_reverse_map = &stack_depot_reverse_map;
45568d75effSDimitry Andric   arg.skip_linker_allocations =
45668d75effSDimitry Andric       flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
45768d75effSDimitry Andric   ForEachChunk(MarkInvalidPCCb, &arg);
45868d75effSDimitry Andric }
45968d75effSDimitry Andric 
46068d75effSDimitry Andric // Sets the appropriate tag on each chunk.
461*5ffd83dbSDimitry Andric static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
462*5ffd83dbSDimitry Andric                               Frontier *frontier) {
463*5ffd83dbSDimitry Andric   ForEachChunk(CollectIgnoredCb, frontier);
464*5ffd83dbSDimitry Andric   ProcessGlobalRegions(frontier);
465*5ffd83dbSDimitry Andric   ProcessThreads(suspended_threads, frontier);
466*5ffd83dbSDimitry Andric   ProcessRootRegions(frontier);
467*5ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
46868d75effSDimitry Andric 
469*5ffd83dbSDimitry Andric   CHECK_EQ(0, frontier->size());
470*5ffd83dbSDimitry Andric   ProcessPC(frontier);
47168d75effSDimitry Andric 
47268d75effSDimitry Andric   // The check here is relatively expensive, so we do this in a separate flood
47368d75effSDimitry Andric   // fill. That way we can skip the check for chunks that are reachable
47468d75effSDimitry Andric   // otherwise.
47568d75effSDimitry Andric   LOG_POINTERS("Processing platform-specific allocations.\n");
476*5ffd83dbSDimitry Andric   ProcessPlatformSpecificAllocations(frontier);
477*5ffd83dbSDimitry Andric   FloodFillTag(frontier, kReachable);
47868d75effSDimitry Andric 
47968d75effSDimitry Andric   // Iterate over leaked chunks and mark those that are reachable from other
48068d75effSDimitry Andric   // leaked chunks.
48168d75effSDimitry Andric   LOG_POINTERS("Scanning leaked chunks.\n");
48268d75effSDimitry Andric   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
48368d75effSDimitry Andric }
48468d75effSDimitry Andric 
48568d75effSDimitry Andric // ForEachChunk callback. Resets the tags to pre-leak-check state.
48668d75effSDimitry Andric static void ResetTagsCb(uptr chunk, void *arg) {
48768d75effSDimitry Andric   (void)arg;
488*5ffd83dbSDimitry Andric   __lsan_current_stage = "ResetTagsCb";
48968d75effSDimitry Andric   chunk = GetUserBegin(chunk);
49068d75effSDimitry Andric   LsanMetadata m(chunk);
49168d75effSDimitry Andric   if (m.allocated() && m.tag() != kIgnored)
49268d75effSDimitry Andric     m.set_tag(kDirectlyLeaked);
49368d75effSDimitry Andric }
49468d75effSDimitry Andric 
49568d75effSDimitry Andric static void PrintStackTraceById(u32 stack_trace_id) {
49668d75effSDimitry Andric   CHECK(stack_trace_id);
49768d75effSDimitry Andric   StackDepotGet(stack_trace_id).Print();
49868d75effSDimitry Andric }
49968d75effSDimitry Andric 
50068d75effSDimitry Andric // ForEachChunk callback. Aggregates information about unreachable chunks into
50168d75effSDimitry Andric // a LeakReport.
50268d75effSDimitry Andric static void CollectLeaksCb(uptr chunk, void *arg) {
50368d75effSDimitry Andric   CHECK(arg);
50468d75effSDimitry Andric   LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
505*5ffd83dbSDimitry Andric   __lsan_current_stage = "CollectLeaksCb";
50668d75effSDimitry Andric   chunk = GetUserBegin(chunk);
50768d75effSDimitry Andric   LsanMetadata m(chunk);
50868d75effSDimitry Andric   if (!m.allocated()) return;
50968d75effSDimitry Andric   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
51068d75effSDimitry Andric     u32 resolution = flags()->resolution;
51168d75effSDimitry Andric     u32 stack_trace_id = 0;
51268d75effSDimitry Andric     if (resolution > 0) {
51368d75effSDimitry Andric       StackTrace stack = StackDepotGet(m.stack_trace_id());
51468d75effSDimitry Andric       stack.size = Min(stack.size, resolution);
51568d75effSDimitry Andric       stack_trace_id = StackDepotPut(stack);
51668d75effSDimitry Andric     } else {
51768d75effSDimitry Andric       stack_trace_id = m.stack_trace_id();
51868d75effSDimitry Andric     }
51968d75effSDimitry Andric     leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
52068d75effSDimitry Andric                                 m.tag());
52168d75effSDimitry Andric   }
52268d75effSDimitry Andric }
52368d75effSDimitry Andric 
52468d75effSDimitry Andric static void PrintMatchedSuppressions() {
52568d75effSDimitry Andric   InternalMmapVector<Suppression *> matched;
52668d75effSDimitry Andric   GetSuppressionContext()->GetMatched(&matched);
52768d75effSDimitry Andric   if (!matched.size())
52868d75effSDimitry Andric     return;
52968d75effSDimitry Andric   const char *line = "-----------------------------------------------------";
53068d75effSDimitry Andric   Printf("%s\n", line);
53168d75effSDimitry Andric   Printf("Suppressions used:\n");
53268d75effSDimitry Andric   Printf("  count      bytes template\n");
53368d75effSDimitry Andric   for (uptr i = 0; i < matched.size(); i++)
53468d75effSDimitry Andric     Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
53568d75effSDimitry Andric         &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
53668d75effSDimitry Andric   Printf("%s\n\n", line);
53768d75effSDimitry Andric }
53868d75effSDimitry Andric 
53968d75effSDimitry Andric static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
54068d75effSDimitry Andric   const InternalMmapVector<tid_t> &suspended_threads =
54168d75effSDimitry Andric       *(const InternalMmapVector<tid_t> *)arg;
54268d75effSDimitry Andric   if (tctx->status == ThreadStatusRunning) {
54368d75effSDimitry Andric     uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
54468d75effSDimitry Andric                                 tctx->os_id, CompareLess<int>());
54568d75effSDimitry Andric     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
54668d75effSDimitry Andric       Report("Running thread %d was not suspended. False leaks are possible.\n",
54768d75effSDimitry Andric              tctx->os_id);
54868d75effSDimitry Andric   }
54968d75effSDimitry Andric }
55068d75effSDimitry Andric 
551*5ffd83dbSDimitry Andric #if SANITIZER_FUCHSIA
552*5ffd83dbSDimitry Andric 
553*5ffd83dbSDimitry Andric // Fuchsia provides a libc interface that guarantees all threads are
554*5ffd83dbSDimitry Andric // covered, and SuspendedThreadList is never really used.
555*5ffd83dbSDimitry Andric static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
556*5ffd83dbSDimitry Andric 
557*5ffd83dbSDimitry Andric #else  // !SANITIZER_FUCHSIA
558*5ffd83dbSDimitry Andric 
55968d75effSDimitry Andric static void ReportUnsuspendedThreads(
56068d75effSDimitry Andric     const SuspendedThreadsList &suspended_threads) {
56168d75effSDimitry Andric   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
56268d75effSDimitry Andric   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
56368d75effSDimitry Andric     threads[i] = suspended_threads.GetThreadID(i);
56468d75effSDimitry Andric 
56568d75effSDimitry Andric   Sort(threads.data(), threads.size());
56668d75effSDimitry Andric 
56768d75effSDimitry Andric   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
56868d75effSDimitry Andric       &ReportIfNotSuspended, &threads);
56968d75effSDimitry Andric }
57068d75effSDimitry Andric 
571*5ffd83dbSDimitry Andric #endif  // !SANITIZER_FUCHSIA
572*5ffd83dbSDimitry Andric 
57368d75effSDimitry Andric static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
57468d75effSDimitry Andric                                   void *arg) {
57568d75effSDimitry Andric   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
57668d75effSDimitry Andric   CHECK(param);
57768d75effSDimitry Andric   CHECK(!param->success);
57868d75effSDimitry Andric   ReportUnsuspendedThreads(suspended_threads);
579*5ffd83dbSDimitry Andric   ClassifyAllChunks(suspended_threads, &param->frontier);
58068d75effSDimitry Andric   ForEachChunk(CollectLeaksCb, &param->leak_report);
58168d75effSDimitry Andric   // Clean up for subsequent leak checks. This assumes we did not overwrite any
58268d75effSDimitry Andric   // kIgnored tags.
58368d75effSDimitry Andric   ForEachChunk(ResetTagsCb, nullptr);
58468d75effSDimitry Andric   param->success = true;
58568d75effSDimitry Andric }
58668d75effSDimitry Andric 
58768d75effSDimitry Andric static bool CheckForLeaks() {
58868d75effSDimitry Andric   if (&__lsan_is_turned_off && __lsan_is_turned_off())
58968d75effSDimitry Andric       return false;
59068d75effSDimitry Andric   EnsureMainThreadIDIsCorrect();
59168d75effSDimitry Andric   CheckForLeaksParam param;
59268d75effSDimitry Andric   LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
59368d75effSDimitry Andric 
59468d75effSDimitry Andric   if (!param.success) {
59568d75effSDimitry Andric     Report("LeakSanitizer has encountered a fatal error.\n");
59668d75effSDimitry Andric     Report(
59768d75effSDimitry Andric         "HINT: For debugging, try setting environment variable "
59868d75effSDimitry Andric         "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
59968d75effSDimitry Andric     Report(
60068d75effSDimitry Andric         "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
60168d75effSDimitry Andric     Die();
60268d75effSDimitry Andric   }
60368d75effSDimitry Andric   param.leak_report.ApplySuppressions();
60468d75effSDimitry Andric   uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
60568d75effSDimitry Andric   if (unsuppressed_count > 0) {
60668d75effSDimitry Andric     Decorator d;
60768d75effSDimitry Andric     Printf("\n"
60868d75effSDimitry Andric            "================================================================="
60968d75effSDimitry Andric            "\n");
61068d75effSDimitry Andric     Printf("%s", d.Error());
61168d75effSDimitry Andric     Report("ERROR: LeakSanitizer: detected memory leaks\n");
61268d75effSDimitry Andric     Printf("%s", d.Default());
61368d75effSDimitry Andric     param.leak_report.ReportTopLeaks(flags()->max_leaks);
61468d75effSDimitry Andric   }
61568d75effSDimitry Andric   if (common_flags()->print_suppressions)
61668d75effSDimitry Andric     PrintMatchedSuppressions();
61768d75effSDimitry Andric   if (unsuppressed_count > 0) {
61868d75effSDimitry Andric     param.leak_report.PrintSummary();
61968d75effSDimitry Andric     return true;
62068d75effSDimitry Andric   }
62168d75effSDimitry Andric   return false;
62268d75effSDimitry Andric }
62368d75effSDimitry Andric 
62468d75effSDimitry Andric static bool has_reported_leaks = false;
62568d75effSDimitry Andric bool HasReportedLeaks() { return has_reported_leaks; }
62668d75effSDimitry Andric 
62768d75effSDimitry Andric void DoLeakCheck() {
62868d75effSDimitry Andric   BlockingMutexLock l(&global_mutex);
62968d75effSDimitry Andric   static bool already_done;
63068d75effSDimitry Andric   if (already_done) return;
63168d75effSDimitry Andric   already_done = true;
63268d75effSDimitry Andric   has_reported_leaks = CheckForLeaks();
63368d75effSDimitry Andric   if (has_reported_leaks) HandleLeaks();
63468d75effSDimitry Andric }
63568d75effSDimitry Andric 
63668d75effSDimitry Andric static int DoRecoverableLeakCheck() {
63768d75effSDimitry Andric   BlockingMutexLock l(&global_mutex);
63868d75effSDimitry Andric   bool have_leaks = CheckForLeaks();
63968d75effSDimitry Andric   return have_leaks ? 1 : 0;
64068d75effSDimitry Andric }
64168d75effSDimitry Andric 
64268d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
64368d75effSDimitry Andric 
64468d75effSDimitry Andric static Suppression *GetSuppressionForAddr(uptr addr) {
64568d75effSDimitry Andric   Suppression *s = nullptr;
64668d75effSDimitry Andric 
64768d75effSDimitry Andric   // Suppress by module name.
64868d75effSDimitry Andric   SuppressionContext *suppressions = GetSuppressionContext();
64968d75effSDimitry Andric   if (const char *module_name =
65068d75effSDimitry Andric           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
65168d75effSDimitry Andric     if (suppressions->Match(module_name, kSuppressionLeak, &s))
65268d75effSDimitry Andric       return s;
65368d75effSDimitry Andric 
65468d75effSDimitry Andric   // Suppress by file or function name.
65568d75effSDimitry Andric   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
65668d75effSDimitry Andric   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
65768d75effSDimitry Andric     if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
65868d75effSDimitry Andric         suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
65968d75effSDimitry Andric       break;
66068d75effSDimitry Andric     }
66168d75effSDimitry Andric   }
66268d75effSDimitry Andric   frames->ClearAll();
66368d75effSDimitry Andric   return s;
66468d75effSDimitry Andric }
66568d75effSDimitry Andric 
66668d75effSDimitry Andric static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
66768d75effSDimitry Andric   StackTrace stack = StackDepotGet(stack_trace_id);
66868d75effSDimitry Andric   for (uptr i = 0; i < stack.size; i++) {
66968d75effSDimitry Andric     Suppression *s = GetSuppressionForAddr(
67068d75effSDimitry Andric         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
67168d75effSDimitry Andric     if (s) return s;
67268d75effSDimitry Andric   }
67368d75effSDimitry Andric   return nullptr;
67468d75effSDimitry Andric }
67568d75effSDimitry Andric 
67668d75effSDimitry Andric ///// LeakReport implementation. /////
67768d75effSDimitry Andric 
67868d75effSDimitry Andric // A hard limit on the number of distinct leaks, to avoid quadratic complexity
67968d75effSDimitry Andric // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
68068d75effSDimitry Andric // in real-world applications.
68168d75effSDimitry Andric // FIXME: Get rid of this limit by changing the implementation of LeakReport to
68268d75effSDimitry Andric // use a hash table.
68368d75effSDimitry Andric const uptr kMaxLeaksConsidered = 5000;
68468d75effSDimitry Andric 
68568d75effSDimitry Andric void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
68668d75effSDimitry Andric                                 uptr leaked_size, ChunkTag tag) {
68768d75effSDimitry Andric   CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
68868d75effSDimitry Andric   bool is_directly_leaked = (tag == kDirectlyLeaked);
68968d75effSDimitry Andric   uptr i;
69068d75effSDimitry Andric   for (i = 0; i < leaks_.size(); i++) {
69168d75effSDimitry Andric     if (leaks_[i].stack_trace_id == stack_trace_id &&
69268d75effSDimitry Andric         leaks_[i].is_directly_leaked == is_directly_leaked) {
69368d75effSDimitry Andric       leaks_[i].hit_count++;
69468d75effSDimitry Andric       leaks_[i].total_size += leaked_size;
69568d75effSDimitry Andric       break;
69668d75effSDimitry Andric     }
69768d75effSDimitry Andric   }
69868d75effSDimitry Andric   if (i == leaks_.size()) {
69968d75effSDimitry Andric     if (leaks_.size() == kMaxLeaksConsidered) return;
70068d75effSDimitry Andric     Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
70168d75effSDimitry Andric                   is_directly_leaked, /* is_suppressed */ false };
70268d75effSDimitry Andric     leaks_.push_back(leak);
70368d75effSDimitry Andric   }
70468d75effSDimitry Andric   if (flags()->report_objects) {
70568d75effSDimitry Andric     LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
70668d75effSDimitry Andric     leaked_objects_.push_back(obj);
70768d75effSDimitry Andric   }
70868d75effSDimitry Andric }
70968d75effSDimitry Andric 
71068d75effSDimitry Andric static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
71168d75effSDimitry Andric   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
71268d75effSDimitry Andric     return leak1.total_size > leak2.total_size;
71368d75effSDimitry Andric   else
71468d75effSDimitry Andric     return leak1.is_directly_leaked;
71568d75effSDimitry Andric }
71668d75effSDimitry Andric 
71768d75effSDimitry Andric void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
71868d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
71968d75effSDimitry Andric   Printf("\n");
72068d75effSDimitry Andric   if (leaks_.size() == kMaxLeaksConsidered)
72168d75effSDimitry Andric     Printf("Too many leaks! Only the first %zu leaks encountered will be "
72268d75effSDimitry Andric            "reported.\n",
72368d75effSDimitry Andric            kMaxLeaksConsidered);
72468d75effSDimitry Andric 
72568d75effSDimitry Andric   uptr unsuppressed_count = UnsuppressedLeakCount();
72668d75effSDimitry Andric   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
72768d75effSDimitry Andric     Printf("The %zu top leak(s):\n", num_leaks_to_report);
72868d75effSDimitry Andric   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
72968d75effSDimitry Andric   uptr leaks_reported = 0;
73068d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
73168d75effSDimitry Andric     if (leaks_[i].is_suppressed) continue;
73268d75effSDimitry Andric     PrintReportForLeak(i);
73368d75effSDimitry Andric     leaks_reported++;
73468d75effSDimitry Andric     if (leaks_reported == num_leaks_to_report) break;
73568d75effSDimitry Andric   }
73668d75effSDimitry Andric   if (leaks_reported < unsuppressed_count) {
73768d75effSDimitry Andric     uptr remaining = unsuppressed_count - leaks_reported;
73868d75effSDimitry Andric     Printf("Omitting %zu more leak(s).\n", remaining);
73968d75effSDimitry Andric   }
74068d75effSDimitry Andric }
74168d75effSDimitry Andric 
74268d75effSDimitry Andric void LeakReport::PrintReportForLeak(uptr index) {
74368d75effSDimitry Andric   Decorator d;
74468d75effSDimitry Andric   Printf("%s", d.Leak());
74568d75effSDimitry Andric   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
74668d75effSDimitry Andric          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
74768d75effSDimitry Andric          leaks_[index].total_size, leaks_[index].hit_count);
74868d75effSDimitry Andric   Printf("%s", d.Default());
74968d75effSDimitry Andric 
75068d75effSDimitry Andric   PrintStackTraceById(leaks_[index].stack_trace_id);
75168d75effSDimitry Andric 
75268d75effSDimitry Andric   if (flags()->report_objects) {
75368d75effSDimitry Andric     Printf("Objects leaked above:\n");
75468d75effSDimitry Andric     PrintLeakedObjectsForLeak(index);
75568d75effSDimitry Andric     Printf("\n");
75668d75effSDimitry Andric   }
75768d75effSDimitry Andric }
75868d75effSDimitry Andric 
75968d75effSDimitry Andric void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
76068d75effSDimitry Andric   u32 leak_id = leaks_[index].id;
76168d75effSDimitry Andric   for (uptr j = 0; j < leaked_objects_.size(); j++) {
76268d75effSDimitry Andric     if (leaked_objects_[j].leak_id == leak_id)
76368d75effSDimitry Andric       Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
76468d75effSDimitry Andric              leaked_objects_[j].size);
76568d75effSDimitry Andric   }
76668d75effSDimitry Andric }
76768d75effSDimitry Andric 
76868d75effSDimitry Andric void LeakReport::PrintSummary() {
76968d75effSDimitry Andric   CHECK(leaks_.size() <= kMaxLeaksConsidered);
77068d75effSDimitry Andric   uptr bytes = 0, allocations = 0;
77168d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
77268d75effSDimitry Andric       if (leaks_[i].is_suppressed) continue;
77368d75effSDimitry Andric       bytes += leaks_[i].total_size;
77468d75effSDimitry Andric       allocations += leaks_[i].hit_count;
77568d75effSDimitry Andric   }
77668d75effSDimitry Andric   InternalScopedString summary(kMaxSummaryLength);
77768d75effSDimitry Andric   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
77868d75effSDimitry Andric                  allocations);
77968d75effSDimitry Andric   ReportErrorSummary(summary.data());
78068d75effSDimitry Andric }
78168d75effSDimitry Andric 
78268d75effSDimitry Andric void LeakReport::ApplySuppressions() {
78368d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++) {
78468d75effSDimitry Andric     Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
78568d75effSDimitry Andric     if (s) {
78668d75effSDimitry Andric       s->weight += leaks_[i].total_size;
78768d75effSDimitry Andric       atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
78868d75effSDimitry Andric           leaks_[i].hit_count);
78968d75effSDimitry Andric       leaks_[i].is_suppressed = true;
79068d75effSDimitry Andric     }
79168d75effSDimitry Andric   }
79268d75effSDimitry Andric }
79368d75effSDimitry Andric 
79468d75effSDimitry Andric uptr LeakReport::UnsuppressedLeakCount() {
79568d75effSDimitry Andric   uptr result = 0;
79668d75effSDimitry Andric   for (uptr i = 0; i < leaks_.size(); i++)
79768d75effSDimitry Andric     if (!leaks_[i].is_suppressed) result++;
79868d75effSDimitry Andric   return result;
79968d75effSDimitry Andric }
80068d75effSDimitry Andric 
80168d75effSDimitry Andric } // namespace __lsan
80268d75effSDimitry Andric #else // CAN_SANITIZE_LEAKS
80368d75effSDimitry Andric namespace __lsan {
80468d75effSDimitry Andric void InitCommonLsan() { }
80568d75effSDimitry Andric void DoLeakCheck() { }
80668d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { }
80768d75effSDimitry Andric void DisableInThisThread() { }
80868d75effSDimitry Andric void EnableInThisThread() { }
80968d75effSDimitry Andric }
81068d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
81168d75effSDimitry Andric 
81268d75effSDimitry Andric using namespace __lsan;
81368d75effSDimitry Andric 
81468d75effSDimitry Andric extern "C" {
81568d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
81668d75effSDimitry Andric void __lsan_ignore_object(const void *p) {
81768d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
81868d75effSDimitry Andric   if (!common_flags()->detect_leaks)
81968d75effSDimitry Andric     return;
82068d75effSDimitry Andric   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
82168d75effSDimitry Andric   // locked.
82268d75effSDimitry Andric   BlockingMutexLock l(&global_mutex);
82368d75effSDimitry Andric   IgnoreObjectResult res = IgnoreObjectLocked(p);
82468d75effSDimitry Andric   if (res == kIgnoreObjectInvalid)
82568d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
82668d75effSDimitry Andric   if (res == kIgnoreObjectAlreadyIgnored)
82768d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): "
82868d75effSDimitry Andric            "heap object at %p is already being ignored\n", p);
82968d75effSDimitry Andric   if (res == kIgnoreObjectSuccess)
83068d75effSDimitry Andric     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
83168d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
83268d75effSDimitry Andric }
83368d75effSDimitry Andric 
83468d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
83568d75effSDimitry Andric void __lsan_register_root_region(const void *begin, uptr size) {
83668d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
83768d75effSDimitry Andric   BlockingMutexLock l(&global_mutex);
83868d75effSDimitry Andric   CHECK(root_regions);
83968d75effSDimitry Andric   RootRegion region = {reinterpret_cast<uptr>(begin), size};
84068d75effSDimitry Andric   root_regions->push_back(region);
84168d75effSDimitry Andric   VReport(1, "Registered root region at %p of size %llu\n", begin, size);
84268d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
84368d75effSDimitry Andric }
84468d75effSDimitry Andric 
84568d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
84668d75effSDimitry Andric void __lsan_unregister_root_region(const void *begin, uptr size) {
84768d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
84868d75effSDimitry Andric   BlockingMutexLock l(&global_mutex);
84968d75effSDimitry Andric   CHECK(root_regions);
85068d75effSDimitry Andric   bool removed = false;
85168d75effSDimitry Andric   for (uptr i = 0; i < root_regions->size(); i++) {
85268d75effSDimitry Andric     RootRegion region = (*root_regions)[i];
85368d75effSDimitry Andric     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
85468d75effSDimitry Andric       removed = true;
85568d75effSDimitry Andric       uptr last_index = root_regions->size() - 1;
85668d75effSDimitry Andric       (*root_regions)[i] = (*root_regions)[last_index];
85768d75effSDimitry Andric       root_regions->pop_back();
85868d75effSDimitry Andric       VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
85968d75effSDimitry Andric       break;
86068d75effSDimitry Andric     }
86168d75effSDimitry Andric   }
86268d75effSDimitry Andric   if (!removed) {
86368d75effSDimitry Andric     Report(
86468d75effSDimitry Andric         "__lsan_unregister_root_region(): region at %p of size %llu has not "
86568d75effSDimitry Andric         "been registered.\n",
86668d75effSDimitry Andric         begin, size);
86768d75effSDimitry Andric     Die();
86868d75effSDimitry Andric   }
86968d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
87068d75effSDimitry Andric }
87168d75effSDimitry Andric 
87268d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
87368d75effSDimitry Andric void __lsan_disable() {
87468d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
87568d75effSDimitry Andric   __lsan::DisableInThisThread();
87668d75effSDimitry Andric #endif
87768d75effSDimitry Andric }
87868d75effSDimitry Andric 
87968d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
88068d75effSDimitry Andric void __lsan_enable() {
88168d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
88268d75effSDimitry Andric   __lsan::EnableInThisThread();
88368d75effSDimitry Andric #endif
88468d75effSDimitry Andric }
88568d75effSDimitry Andric 
88668d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
88768d75effSDimitry Andric void __lsan_do_leak_check() {
88868d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
88968d75effSDimitry Andric   if (common_flags()->detect_leaks)
89068d75effSDimitry Andric     __lsan::DoLeakCheck();
89168d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
89268d75effSDimitry Andric }
89368d75effSDimitry Andric 
89468d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE
89568d75effSDimitry Andric int __lsan_do_recoverable_leak_check() {
89668d75effSDimitry Andric #if CAN_SANITIZE_LEAKS
89768d75effSDimitry Andric   if (common_flags()->detect_leaks)
89868d75effSDimitry Andric     return __lsan::DoRecoverableLeakCheck();
89968d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS
90068d75effSDimitry Andric   return 0;
90168d75effSDimitry Andric }
90268d75effSDimitry Andric 
90368d75effSDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS
90468d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
90568d75effSDimitry Andric const char * __lsan_default_options() {
90668d75effSDimitry Andric   return "";
90768d75effSDimitry Andric }
90868d75effSDimitry Andric 
90968d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
91068d75effSDimitry Andric int __lsan_is_turned_off() {
91168d75effSDimitry Andric   return 0;
91268d75effSDimitry Andric }
91368d75effSDimitry Andric 
91468d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
91568d75effSDimitry Andric const char *__lsan_default_suppressions() {
91668d75effSDimitry Andric   return "";
91768d75effSDimitry Andric }
91868d75effSDimitry Andric #endif
91968d75effSDimitry Andric } // extern "C"
920