1*68d75effSDimitry Andric //=-- lsan_common.cpp -----------------------------------------------------===// 2*68d75effSDimitry Andric // 3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information. 5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6*68d75effSDimitry Andric // 7*68d75effSDimitry Andric //===----------------------------------------------------------------------===// 8*68d75effSDimitry Andric // 9*68d75effSDimitry Andric // This file is a part of LeakSanitizer. 10*68d75effSDimitry Andric // Implementation of common leak checking functionality. 11*68d75effSDimitry Andric // 12*68d75effSDimitry Andric //===----------------------------------------------------------------------===// 13*68d75effSDimitry Andric 14*68d75effSDimitry Andric #include "lsan_common.h" 15*68d75effSDimitry Andric 16*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_common.h" 17*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_flag_parser.h" 18*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_flags.h" 19*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_placement_new.h" 20*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_procmaps.h" 21*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_report_decorator.h" 22*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h" 23*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_stacktrace.h" 24*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_suppressions.h" 25*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_thread_registry.h" 26*68d75effSDimitry Andric #include "sanitizer_common/sanitizer_tls_get_addr.h" 27*68d75effSDimitry Andric 28*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 29*68d75effSDimitry Andric namespace __lsan { 30*68d75effSDimitry Andric 31*68d75effSDimitry Andric // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and 32*68d75effSDimitry Andric // also to protect the global list of root regions. 33*68d75effSDimitry Andric BlockingMutex global_mutex(LINKER_INITIALIZED); 34*68d75effSDimitry Andric 35*68d75effSDimitry Andric Flags lsan_flags; 36*68d75effSDimitry Andric 37*68d75effSDimitry Andric void DisableCounterUnderflow() { 38*68d75effSDimitry Andric if (common_flags()->detect_leaks) { 39*68d75effSDimitry Andric Report("Unmatched call to __lsan_enable().\n"); 40*68d75effSDimitry Andric Die(); 41*68d75effSDimitry Andric } 42*68d75effSDimitry Andric } 43*68d75effSDimitry Andric 44*68d75effSDimitry Andric void Flags::SetDefaults() { 45*68d75effSDimitry Andric #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; 46*68d75effSDimitry Andric #include "lsan_flags.inc" 47*68d75effSDimitry Andric #undef LSAN_FLAG 48*68d75effSDimitry Andric } 49*68d75effSDimitry Andric 50*68d75effSDimitry Andric void RegisterLsanFlags(FlagParser *parser, Flags *f) { 51*68d75effSDimitry Andric #define LSAN_FLAG(Type, Name, DefaultValue, Description) \ 52*68d75effSDimitry Andric RegisterFlag(parser, #Name, Description, &f->Name); 53*68d75effSDimitry Andric #include "lsan_flags.inc" 54*68d75effSDimitry Andric #undef LSAN_FLAG 55*68d75effSDimitry Andric } 56*68d75effSDimitry Andric 57*68d75effSDimitry Andric #define LOG_POINTERS(...) \ 58*68d75effSDimitry Andric do { \ 59*68d75effSDimitry Andric if (flags()->log_pointers) Report(__VA_ARGS__); \ 60*68d75effSDimitry Andric } while (0) 61*68d75effSDimitry Andric 62*68d75effSDimitry Andric #define LOG_THREADS(...) \ 63*68d75effSDimitry Andric do { \ 64*68d75effSDimitry Andric if (flags()->log_threads) Report(__VA_ARGS__); \ 65*68d75effSDimitry Andric } while (0) 66*68d75effSDimitry Andric 67*68d75effSDimitry Andric ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)]; 68*68d75effSDimitry Andric static SuppressionContext *suppression_ctx = nullptr; 69*68d75effSDimitry Andric static const char kSuppressionLeak[] = "leak"; 70*68d75effSDimitry Andric static const char *kSuppressionTypes[] = { kSuppressionLeak }; 71*68d75effSDimitry Andric static const char kStdSuppressions[] = 72*68d75effSDimitry Andric #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 73*68d75effSDimitry Andric // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 74*68d75effSDimitry Andric // definition. 75*68d75effSDimitry Andric "leak:*pthread_exit*\n" 76*68d75effSDimitry Andric #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 77*68d75effSDimitry Andric #if SANITIZER_MAC 78*68d75effSDimitry Andric // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173 79*68d75effSDimitry Andric "leak:*_os_trace*\n" 80*68d75effSDimitry Andric #endif 81*68d75effSDimitry Andric // TLS leak in some glibc versions, described in 82*68d75effSDimitry Andric // https://sourceware.org/bugzilla/show_bug.cgi?id=12650. 83*68d75effSDimitry Andric "leak:*tls_get_addr*\n"; 84*68d75effSDimitry Andric 85*68d75effSDimitry Andric void InitializeSuppressions() { 86*68d75effSDimitry Andric CHECK_EQ(nullptr, suppression_ctx); 87*68d75effSDimitry Andric suppression_ctx = new (suppression_placeholder) 88*68d75effSDimitry Andric SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes)); 89*68d75effSDimitry Andric suppression_ctx->ParseFromFile(flags()->suppressions); 90*68d75effSDimitry Andric if (&__lsan_default_suppressions) 91*68d75effSDimitry Andric suppression_ctx->Parse(__lsan_default_suppressions()); 92*68d75effSDimitry Andric suppression_ctx->Parse(kStdSuppressions); 93*68d75effSDimitry Andric } 94*68d75effSDimitry Andric 95*68d75effSDimitry Andric static SuppressionContext *GetSuppressionContext() { 96*68d75effSDimitry Andric CHECK(suppression_ctx); 97*68d75effSDimitry Andric return suppression_ctx; 98*68d75effSDimitry Andric } 99*68d75effSDimitry Andric 100*68d75effSDimitry Andric static InternalMmapVector<RootRegion> *root_regions; 101*68d75effSDimitry Andric 102*68d75effSDimitry Andric InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; } 103*68d75effSDimitry Andric 104*68d75effSDimitry Andric void InitializeRootRegions() { 105*68d75effSDimitry Andric CHECK(!root_regions); 106*68d75effSDimitry Andric ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)]; 107*68d75effSDimitry Andric root_regions = new (placeholder) InternalMmapVector<RootRegion>(); 108*68d75effSDimitry Andric } 109*68d75effSDimitry Andric 110*68d75effSDimitry Andric const char *MaybeCallLsanDefaultOptions() { 111*68d75effSDimitry Andric return (&__lsan_default_options) ? __lsan_default_options() : ""; 112*68d75effSDimitry Andric } 113*68d75effSDimitry Andric 114*68d75effSDimitry Andric void InitCommonLsan() { 115*68d75effSDimitry Andric InitializeRootRegions(); 116*68d75effSDimitry Andric if (common_flags()->detect_leaks) { 117*68d75effSDimitry Andric // Initialization which can fail or print warnings should only be done if 118*68d75effSDimitry Andric // LSan is actually enabled. 119*68d75effSDimitry Andric InitializeSuppressions(); 120*68d75effSDimitry Andric InitializePlatformSpecificModules(); 121*68d75effSDimitry Andric } 122*68d75effSDimitry Andric } 123*68d75effSDimitry Andric 124*68d75effSDimitry Andric class Decorator: public __sanitizer::SanitizerCommonDecorator { 125*68d75effSDimitry Andric public: 126*68d75effSDimitry Andric Decorator() : SanitizerCommonDecorator() { } 127*68d75effSDimitry Andric const char *Error() { return Red(); } 128*68d75effSDimitry Andric const char *Leak() { return Blue(); } 129*68d75effSDimitry Andric }; 130*68d75effSDimitry Andric 131*68d75effSDimitry Andric static inline bool CanBeAHeapPointer(uptr p) { 132*68d75effSDimitry Andric // Since our heap is located in mmap-ed memory, we can assume a sensible lower 133*68d75effSDimitry Andric // bound on heap addresses. 134*68d75effSDimitry Andric const uptr kMinAddress = 4 * 4096; 135*68d75effSDimitry Andric if (p < kMinAddress) return false; 136*68d75effSDimitry Andric #if defined(__x86_64__) 137*68d75effSDimitry Andric // Accept only canonical form user-space addresses. 138*68d75effSDimitry Andric return ((p >> 47) == 0); 139*68d75effSDimitry Andric #elif defined(__mips64) 140*68d75effSDimitry Andric return ((p >> 40) == 0); 141*68d75effSDimitry Andric #elif defined(__aarch64__) 142*68d75effSDimitry Andric unsigned runtimeVMA = 143*68d75effSDimitry Andric (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); 144*68d75effSDimitry Andric return ((p >> runtimeVMA) == 0); 145*68d75effSDimitry Andric #else 146*68d75effSDimitry Andric return true; 147*68d75effSDimitry Andric #endif 148*68d75effSDimitry Andric } 149*68d75effSDimitry Andric 150*68d75effSDimitry Andric // Scans the memory range, looking for byte patterns that point into allocator 151*68d75effSDimitry Andric // chunks. Marks those chunks with |tag| and adds them to |frontier|. 152*68d75effSDimitry Andric // There are two usage modes for this function: finding reachable chunks 153*68d75effSDimitry Andric // (|tag| = kReachable) and finding indirectly leaked chunks 154*68d75effSDimitry Andric // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, 155*68d75effSDimitry Andric // so |frontier| = 0. 156*68d75effSDimitry Andric void ScanRangeForPointers(uptr begin, uptr end, 157*68d75effSDimitry Andric Frontier *frontier, 158*68d75effSDimitry Andric const char *region_type, ChunkTag tag) { 159*68d75effSDimitry Andric CHECK(tag == kReachable || tag == kIndirectlyLeaked); 160*68d75effSDimitry Andric const uptr alignment = flags()->pointer_alignment(); 161*68d75effSDimitry Andric LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); 162*68d75effSDimitry Andric uptr pp = begin; 163*68d75effSDimitry Andric if (pp % alignment) 164*68d75effSDimitry Andric pp = pp + alignment - pp % alignment; 165*68d75effSDimitry Andric for (; pp + sizeof(void *) <= end; pp += alignment) { 166*68d75effSDimitry Andric void *p = *reinterpret_cast<void **>(pp); 167*68d75effSDimitry Andric if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue; 168*68d75effSDimitry Andric uptr chunk = PointsIntoChunk(p); 169*68d75effSDimitry Andric if (!chunk) continue; 170*68d75effSDimitry Andric // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. 171*68d75effSDimitry Andric if (chunk == begin) continue; 172*68d75effSDimitry Andric LsanMetadata m(chunk); 173*68d75effSDimitry Andric if (m.tag() == kReachable || m.tag() == kIgnored) continue; 174*68d75effSDimitry Andric 175*68d75effSDimitry Andric // Do this check relatively late so we can log only the interesting cases. 176*68d75effSDimitry Andric if (!flags()->use_poisoned && WordIsPoisoned(pp)) { 177*68d75effSDimitry Andric LOG_POINTERS( 178*68d75effSDimitry Andric "%p is poisoned: ignoring %p pointing into chunk %p-%p of size " 179*68d75effSDimitry Andric "%zu.\n", 180*68d75effSDimitry Andric pp, p, chunk, chunk + m.requested_size(), m.requested_size()); 181*68d75effSDimitry Andric continue; 182*68d75effSDimitry Andric } 183*68d75effSDimitry Andric 184*68d75effSDimitry Andric m.set_tag(tag); 185*68d75effSDimitry Andric LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p, 186*68d75effSDimitry Andric chunk, chunk + m.requested_size(), m.requested_size()); 187*68d75effSDimitry Andric if (frontier) 188*68d75effSDimitry Andric frontier->push_back(chunk); 189*68d75effSDimitry Andric } 190*68d75effSDimitry Andric } 191*68d75effSDimitry Andric 192*68d75effSDimitry Andric // Scans a global range for pointers 193*68d75effSDimitry Andric void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) { 194*68d75effSDimitry Andric uptr allocator_begin = 0, allocator_end = 0; 195*68d75effSDimitry Andric GetAllocatorGlobalRange(&allocator_begin, &allocator_end); 196*68d75effSDimitry Andric if (begin <= allocator_begin && allocator_begin < end) { 197*68d75effSDimitry Andric CHECK_LE(allocator_begin, allocator_end); 198*68d75effSDimitry Andric CHECK_LE(allocator_end, end); 199*68d75effSDimitry Andric if (begin < allocator_begin) 200*68d75effSDimitry Andric ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL", 201*68d75effSDimitry Andric kReachable); 202*68d75effSDimitry Andric if (allocator_end < end) 203*68d75effSDimitry Andric ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable); 204*68d75effSDimitry Andric } else { 205*68d75effSDimitry Andric ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable); 206*68d75effSDimitry Andric } 207*68d75effSDimitry Andric } 208*68d75effSDimitry Andric 209*68d75effSDimitry Andric void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) { 210*68d75effSDimitry Andric Frontier *frontier = reinterpret_cast<Frontier *>(arg); 211*68d75effSDimitry Andric ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable); 212*68d75effSDimitry Andric } 213*68d75effSDimitry Andric 214*68d75effSDimitry Andric // Scans thread data (stacks and TLS) for heap pointers. 215*68d75effSDimitry Andric static void ProcessThreads(SuspendedThreadsList const &suspended_threads, 216*68d75effSDimitry Andric Frontier *frontier) { 217*68d75effSDimitry Andric InternalMmapVector<uptr> registers(suspended_threads.RegisterCount()); 218*68d75effSDimitry Andric uptr registers_begin = reinterpret_cast<uptr>(registers.data()); 219*68d75effSDimitry Andric uptr registers_end = 220*68d75effSDimitry Andric reinterpret_cast<uptr>(registers.data() + registers.size()); 221*68d75effSDimitry Andric for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) { 222*68d75effSDimitry Andric tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i)); 223*68d75effSDimitry Andric LOG_THREADS("Processing thread %d.\n", os_id); 224*68d75effSDimitry Andric uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end; 225*68d75effSDimitry Andric DTLS *dtls; 226*68d75effSDimitry Andric bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end, 227*68d75effSDimitry Andric &tls_begin, &tls_end, 228*68d75effSDimitry Andric &cache_begin, &cache_end, &dtls); 229*68d75effSDimitry Andric if (!thread_found) { 230*68d75effSDimitry Andric // If a thread can't be found in the thread registry, it's probably in the 231*68d75effSDimitry Andric // process of destruction. Log this event and move on. 232*68d75effSDimitry Andric LOG_THREADS("Thread %d not found in registry.\n", os_id); 233*68d75effSDimitry Andric continue; 234*68d75effSDimitry Andric } 235*68d75effSDimitry Andric uptr sp; 236*68d75effSDimitry Andric PtraceRegistersStatus have_registers = 237*68d75effSDimitry Andric suspended_threads.GetRegistersAndSP(i, registers.data(), &sp); 238*68d75effSDimitry Andric if (have_registers != REGISTERS_AVAILABLE) { 239*68d75effSDimitry Andric Report("Unable to get registers from thread %d.\n", os_id); 240*68d75effSDimitry Andric // If unable to get SP, consider the entire stack to be reachable unless 241*68d75effSDimitry Andric // GetRegistersAndSP failed with ESRCH. 242*68d75effSDimitry Andric if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue; 243*68d75effSDimitry Andric sp = stack_begin; 244*68d75effSDimitry Andric } 245*68d75effSDimitry Andric 246*68d75effSDimitry Andric if (flags()->use_registers && have_registers) 247*68d75effSDimitry Andric ScanRangeForPointers(registers_begin, registers_end, frontier, 248*68d75effSDimitry Andric "REGISTERS", kReachable); 249*68d75effSDimitry Andric 250*68d75effSDimitry Andric if (flags()->use_stacks) { 251*68d75effSDimitry Andric LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp); 252*68d75effSDimitry Andric if (sp < stack_begin || sp >= stack_end) { 253*68d75effSDimitry Andric // SP is outside the recorded stack range (e.g. the thread is running a 254*68d75effSDimitry Andric // signal handler on alternate stack, or swapcontext was used). 255*68d75effSDimitry Andric // Again, consider the entire stack range to be reachable. 256*68d75effSDimitry Andric LOG_THREADS("WARNING: stack pointer not in stack range.\n"); 257*68d75effSDimitry Andric uptr page_size = GetPageSizeCached(); 258*68d75effSDimitry Andric int skipped = 0; 259*68d75effSDimitry Andric while (stack_begin < stack_end && 260*68d75effSDimitry Andric !IsAccessibleMemoryRange(stack_begin, 1)) { 261*68d75effSDimitry Andric skipped++; 262*68d75effSDimitry Andric stack_begin += page_size; 263*68d75effSDimitry Andric } 264*68d75effSDimitry Andric LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", 265*68d75effSDimitry Andric skipped, stack_begin, stack_end); 266*68d75effSDimitry Andric } else { 267*68d75effSDimitry Andric // Shrink the stack range to ignore out-of-scope values. 268*68d75effSDimitry Andric stack_begin = sp; 269*68d75effSDimitry Andric } 270*68d75effSDimitry Andric ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK", 271*68d75effSDimitry Andric kReachable); 272*68d75effSDimitry Andric ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier); 273*68d75effSDimitry Andric } 274*68d75effSDimitry Andric 275*68d75effSDimitry Andric if (flags()->use_tls) { 276*68d75effSDimitry Andric if (tls_begin) { 277*68d75effSDimitry Andric LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end); 278*68d75effSDimitry Andric // If the tls and cache ranges don't overlap, scan full tls range, 279*68d75effSDimitry Andric // otherwise, only scan the non-overlapping portions 280*68d75effSDimitry Andric if (cache_begin == cache_end || tls_end < cache_begin || 281*68d75effSDimitry Andric tls_begin > cache_end) { 282*68d75effSDimitry Andric ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable); 283*68d75effSDimitry Andric } else { 284*68d75effSDimitry Andric if (tls_begin < cache_begin) 285*68d75effSDimitry Andric ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS", 286*68d75effSDimitry Andric kReachable); 287*68d75effSDimitry Andric if (tls_end > cache_end) 288*68d75effSDimitry Andric ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", 289*68d75effSDimitry Andric kReachable); 290*68d75effSDimitry Andric } 291*68d75effSDimitry Andric } 292*68d75effSDimitry Andric if (dtls && !DTLSInDestruction(dtls)) { 293*68d75effSDimitry Andric for (uptr j = 0; j < dtls->dtv_size; ++j) { 294*68d75effSDimitry Andric uptr dtls_beg = dtls->dtv[j].beg; 295*68d75effSDimitry Andric uptr dtls_end = dtls_beg + dtls->dtv[j].size; 296*68d75effSDimitry Andric if (dtls_beg < dtls_end) { 297*68d75effSDimitry Andric LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end); 298*68d75effSDimitry Andric ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS", 299*68d75effSDimitry Andric kReachable); 300*68d75effSDimitry Andric } 301*68d75effSDimitry Andric } 302*68d75effSDimitry Andric } else { 303*68d75effSDimitry Andric // We are handling a thread with DTLS under destruction. Log about 304*68d75effSDimitry Andric // this and continue. 305*68d75effSDimitry Andric LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id); 306*68d75effSDimitry Andric } 307*68d75effSDimitry Andric } 308*68d75effSDimitry Andric } 309*68d75effSDimitry Andric } 310*68d75effSDimitry Andric 311*68d75effSDimitry Andric void ScanRootRegion(Frontier *frontier, const RootRegion &root_region, 312*68d75effSDimitry Andric uptr region_begin, uptr region_end, bool is_readable) { 313*68d75effSDimitry Andric uptr intersection_begin = Max(root_region.begin, region_begin); 314*68d75effSDimitry Andric uptr intersection_end = Min(region_end, root_region.begin + root_region.size); 315*68d75effSDimitry Andric if (intersection_begin >= intersection_end) return; 316*68d75effSDimitry Andric LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n", 317*68d75effSDimitry Andric root_region.begin, root_region.begin + root_region.size, 318*68d75effSDimitry Andric region_begin, region_end, 319*68d75effSDimitry Andric is_readable ? "readable" : "unreadable"); 320*68d75effSDimitry Andric if (is_readable) 321*68d75effSDimitry Andric ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT", 322*68d75effSDimitry Andric kReachable); 323*68d75effSDimitry Andric } 324*68d75effSDimitry Andric 325*68d75effSDimitry Andric static void ProcessRootRegion(Frontier *frontier, 326*68d75effSDimitry Andric const RootRegion &root_region) { 327*68d75effSDimitry Andric MemoryMappingLayout proc_maps(/*cache_enabled*/ true); 328*68d75effSDimitry Andric MemoryMappedSegment segment; 329*68d75effSDimitry Andric while (proc_maps.Next(&segment)) { 330*68d75effSDimitry Andric ScanRootRegion(frontier, root_region, segment.start, segment.end, 331*68d75effSDimitry Andric segment.IsReadable()); 332*68d75effSDimitry Andric } 333*68d75effSDimitry Andric } 334*68d75effSDimitry Andric 335*68d75effSDimitry Andric // Scans root regions for heap pointers. 336*68d75effSDimitry Andric static void ProcessRootRegions(Frontier *frontier) { 337*68d75effSDimitry Andric if (!flags()->use_root_regions) return; 338*68d75effSDimitry Andric CHECK(root_regions); 339*68d75effSDimitry Andric for (uptr i = 0; i < root_regions->size(); i++) { 340*68d75effSDimitry Andric ProcessRootRegion(frontier, (*root_regions)[i]); 341*68d75effSDimitry Andric } 342*68d75effSDimitry Andric } 343*68d75effSDimitry Andric 344*68d75effSDimitry Andric static void FloodFillTag(Frontier *frontier, ChunkTag tag) { 345*68d75effSDimitry Andric while (frontier->size()) { 346*68d75effSDimitry Andric uptr next_chunk = frontier->back(); 347*68d75effSDimitry Andric frontier->pop_back(); 348*68d75effSDimitry Andric LsanMetadata m(next_chunk); 349*68d75effSDimitry Andric ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier, 350*68d75effSDimitry Andric "HEAP", tag); 351*68d75effSDimitry Andric } 352*68d75effSDimitry Andric } 353*68d75effSDimitry Andric 354*68d75effSDimitry Andric // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks 355*68d75effSDimitry Andric // which are reachable from it as indirectly leaked. 356*68d75effSDimitry Andric static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) { 357*68d75effSDimitry Andric chunk = GetUserBegin(chunk); 358*68d75effSDimitry Andric LsanMetadata m(chunk); 359*68d75effSDimitry Andric if (m.allocated() && m.tag() != kReachable) { 360*68d75effSDimitry Andric ScanRangeForPointers(chunk, chunk + m.requested_size(), 361*68d75effSDimitry Andric /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); 362*68d75effSDimitry Andric } 363*68d75effSDimitry Andric } 364*68d75effSDimitry Andric 365*68d75effSDimitry Andric // ForEachChunk callback. If chunk is marked as ignored, adds its address to 366*68d75effSDimitry Andric // frontier. 367*68d75effSDimitry Andric static void CollectIgnoredCb(uptr chunk, void *arg) { 368*68d75effSDimitry Andric CHECK(arg); 369*68d75effSDimitry Andric chunk = GetUserBegin(chunk); 370*68d75effSDimitry Andric LsanMetadata m(chunk); 371*68d75effSDimitry Andric if (m.allocated() && m.tag() == kIgnored) { 372*68d75effSDimitry Andric LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", 373*68d75effSDimitry Andric chunk, chunk + m.requested_size(), m.requested_size()); 374*68d75effSDimitry Andric reinterpret_cast<Frontier *>(arg)->push_back(chunk); 375*68d75effSDimitry Andric } 376*68d75effSDimitry Andric } 377*68d75effSDimitry Andric 378*68d75effSDimitry Andric static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { 379*68d75effSDimitry Andric CHECK(stack_id); 380*68d75effSDimitry Andric StackTrace stack = map->Get(stack_id); 381*68d75effSDimitry Andric // The top frame is our malloc/calloc/etc. The next frame is the caller. 382*68d75effSDimitry Andric if (stack.size >= 2) 383*68d75effSDimitry Andric return stack.trace[1]; 384*68d75effSDimitry Andric return 0; 385*68d75effSDimitry Andric } 386*68d75effSDimitry Andric 387*68d75effSDimitry Andric struct InvalidPCParam { 388*68d75effSDimitry Andric Frontier *frontier; 389*68d75effSDimitry Andric StackDepotReverseMap *stack_depot_reverse_map; 390*68d75effSDimitry Andric bool skip_linker_allocations; 391*68d75effSDimitry Andric }; 392*68d75effSDimitry Andric 393*68d75effSDimitry Andric // ForEachChunk callback. If the caller pc is invalid or is within the linker, 394*68d75effSDimitry Andric // mark as reachable. Called by ProcessPlatformSpecificAllocations. 395*68d75effSDimitry Andric static void MarkInvalidPCCb(uptr chunk, void *arg) { 396*68d75effSDimitry Andric CHECK(arg); 397*68d75effSDimitry Andric InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg); 398*68d75effSDimitry Andric chunk = GetUserBegin(chunk); 399*68d75effSDimitry Andric LsanMetadata m(chunk); 400*68d75effSDimitry Andric if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { 401*68d75effSDimitry Andric u32 stack_id = m.stack_trace_id(); 402*68d75effSDimitry Andric uptr caller_pc = 0; 403*68d75effSDimitry Andric if (stack_id > 0) 404*68d75effSDimitry Andric caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map); 405*68d75effSDimitry Andric // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark 406*68d75effSDimitry Andric // it as reachable, as we can't properly report its allocation stack anyway. 407*68d75effSDimitry Andric if (caller_pc == 0 || (param->skip_linker_allocations && 408*68d75effSDimitry Andric GetLinker()->containsAddress(caller_pc))) { 409*68d75effSDimitry Andric m.set_tag(kReachable); 410*68d75effSDimitry Andric param->frontier->push_back(chunk); 411*68d75effSDimitry Andric } 412*68d75effSDimitry Andric } 413*68d75effSDimitry Andric } 414*68d75effSDimitry Andric 415*68d75effSDimitry Andric // On Linux, treats all chunks allocated from ld-linux.so as reachable, which 416*68d75effSDimitry Andric // covers dynamically allocated TLS blocks, internal dynamic loader's loaded 417*68d75effSDimitry Andric // modules accounting etc. 418*68d75effSDimitry Andric // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules. 419*68d75effSDimitry Andric // They are allocated with a __libc_memalign() call in allocate_and_init() 420*68d75effSDimitry Andric // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those 421*68d75effSDimitry Andric // blocks, but we can make sure they come from our own allocator by intercepting 422*68d75effSDimitry Andric // __libc_memalign(). On top of that, there is no easy way to reach them. Their 423*68d75effSDimitry Andric // addresses are stored in a dynamically allocated array (the DTV) which is 424*68d75effSDimitry Andric // referenced from the static TLS. Unfortunately, we can't just rely on the DTV 425*68d75effSDimitry Andric // being reachable from the static TLS, and the dynamic TLS being reachable from 426*68d75effSDimitry Andric // the DTV. This is because the initial DTV is allocated before our interception 427*68d75effSDimitry Andric // mechanism kicks in, and thus we don't recognize it as allocated memory. We 428*68d75effSDimitry Andric // can't special-case it either, since we don't know its size. 429*68d75effSDimitry Andric // Our solution is to include in the root set all allocations made from 430*68d75effSDimitry Andric // ld-linux.so (which is where allocate_and_init() is implemented). This is 431*68d75effSDimitry Andric // guaranteed to include all dynamic TLS blocks (and possibly other allocations 432*68d75effSDimitry Andric // which we don't care about). 433*68d75effSDimitry Andric // On all other platforms, this simply checks to ensure that the caller pc is 434*68d75effSDimitry Andric // valid before reporting chunks as leaked. 435*68d75effSDimitry Andric void ProcessPC(Frontier *frontier) { 436*68d75effSDimitry Andric StackDepotReverseMap stack_depot_reverse_map; 437*68d75effSDimitry Andric InvalidPCParam arg; 438*68d75effSDimitry Andric arg.frontier = frontier; 439*68d75effSDimitry Andric arg.stack_depot_reverse_map = &stack_depot_reverse_map; 440*68d75effSDimitry Andric arg.skip_linker_allocations = 441*68d75effSDimitry Andric flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr; 442*68d75effSDimitry Andric ForEachChunk(MarkInvalidPCCb, &arg); 443*68d75effSDimitry Andric } 444*68d75effSDimitry Andric 445*68d75effSDimitry Andric // Sets the appropriate tag on each chunk. 446*68d75effSDimitry Andric static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) { 447*68d75effSDimitry Andric // Holds the flood fill frontier. 448*68d75effSDimitry Andric Frontier frontier; 449*68d75effSDimitry Andric 450*68d75effSDimitry Andric ForEachChunk(CollectIgnoredCb, &frontier); 451*68d75effSDimitry Andric ProcessGlobalRegions(&frontier); 452*68d75effSDimitry Andric ProcessThreads(suspended_threads, &frontier); 453*68d75effSDimitry Andric ProcessRootRegions(&frontier); 454*68d75effSDimitry Andric FloodFillTag(&frontier, kReachable); 455*68d75effSDimitry Andric 456*68d75effSDimitry Andric CHECK_EQ(0, frontier.size()); 457*68d75effSDimitry Andric ProcessPC(&frontier); 458*68d75effSDimitry Andric 459*68d75effSDimitry Andric // The check here is relatively expensive, so we do this in a separate flood 460*68d75effSDimitry Andric // fill. That way we can skip the check for chunks that are reachable 461*68d75effSDimitry Andric // otherwise. 462*68d75effSDimitry Andric LOG_POINTERS("Processing platform-specific allocations.\n"); 463*68d75effSDimitry Andric ProcessPlatformSpecificAllocations(&frontier); 464*68d75effSDimitry Andric FloodFillTag(&frontier, kReachable); 465*68d75effSDimitry Andric 466*68d75effSDimitry Andric // Iterate over leaked chunks and mark those that are reachable from other 467*68d75effSDimitry Andric // leaked chunks. 468*68d75effSDimitry Andric LOG_POINTERS("Scanning leaked chunks.\n"); 469*68d75effSDimitry Andric ForEachChunk(MarkIndirectlyLeakedCb, nullptr); 470*68d75effSDimitry Andric } 471*68d75effSDimitry Andric 472*68d75effSDimitry Andric // ForEachChunk callback. Resets the tags to pre-leak-check state. 473*68d75effSDimitry Andric static void ResetTagsCb(uptr chunk, void *arg) { 474*68d75effSDimitry Andric (void)arg; 475*68d75effSDimitry Andric chunk = GetUserBegin(chunk); 476*68d75effSDimitry Andric LsanMetadata m(chunk); 477*68d75effSDimitry Andric if (m.allocated() && m.tag() != kIgnored) 478*68d75effSDimitry Andric m.set_tag(kDirectlyLeaked); 479*68d75effSDimitry Andric } 480*68d75effSDimitry Andric 481*68d75effSDimitry Andric static void PrintStackTraceById(u32 stack_trace_id) { 482*68d75effSDimitry Andric CHECK(stack_trace_id); 483*68d75effSDimitry Andric StackDepotGet(stack_trace_id).Print(); 484*68d75effSDimitry Andric } 485*68d75effSDimitry Andric 486*68d75effSDimitry Andric // ForEachChunk callback. Aggregates information about unreachable chunks into 487*68d75effSDimitry Andric // a LeakReport. 488*68d75effSDimitry Andric static void CollectLeaksCb(uptr chunk, void *arg) { 489*68d75effSDimitry Andric CHECK(arg); 490*68d75effSDimitry Andric LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg); 491*68d75effSDimitry Andric chunk = GetUserBegin(chunk); 492*68d75effSDimitry Andric LsanMetadata m(chunk); 493*68d75effSDimitry Andric if (!m.allocated()) return; 494*68d75effSDimitry Andric if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { 495*68d75effSDimitry Andric u32 resolution = flags()->resolution; 496*68d75effSDimitry Andric u32 stack_trace_id = 0; 497*68d75effSDimitry Andric if (resolution > 0) { 498*68d75effSDimitry Andric StackTrace stack = StackDepotGet(m.stack_trace_id()); 499*68d75effSDimitry Andric stack.size = Min(stack.size, resolution); 500*68d75effSDimitry Andric stack_trace_id = StackDepotPut(stack); 501*68d75effSDimitry Andric } else { 502*68d75effSDimitry Andric stack_trace_id = m.stack_trace_id(); 503*68d75effSDimitry Andric } 504*68d75effSDimitry Andric leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(), 505*68d75effSDimitry Andric m.tag()); 506*68d75effSDimitry Andric } 507*68d75effSDimitry Andric } 508*68d75effSDimitry Andric 509*68d75effSDimitry Andric static void PrintMatchedSuppressions() { 510*68d75effSDimitry Andric InternalMmapVector<Suppression *> matched; 511*68d75effSDimitry Andric GetSuppressionContext()->GetMatched(&matched); 512*68d75effSDimitry Andric if (!matched.size()) 513*68d75effSDimitry Andric return; 514*68d75effSDimitry Andric const char *line = "-----------------------------------------------------"; 515*68d75effSDimitry Andric Printf("%s\n", line); 516*68d75effSDimitry Andric Printf("Suppressions used:\n"); 517*68d75effSDimitry Andric Printf(" count bytes template\n"); 518*68d75effSDimitry Andric for (uptr i = 0; i < matched.size(); i++) 519*68d75effSDimitry Andric Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed( 520*68d75effSDimitry Andric &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ); 521*68d75effSDimitry Andric Printf("%s\n\n", line); 522*68d75effSDimitry Andric } 523*68d75effSDimitry Andric 524*68d75effSDimitry Andric struct CheckForLeaksParam { 525*68d75effSDimitry Andric bool success; 526*68d75effSDimitry Andric LeakReport leak_report; 527*68d75effSDimitry Andric }; 528*68d75effSDimitry Andric 529*68d75effSDimitry Andric static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) { 530*68d75effSDimitry Andric const InternalMmapVector<tid_t> &suspended_threads = 531*68d75effSDimitry Andric *(const InternalMmapVector<tid_t> *)arg; 532*68d75effSDimitry Andric if (tctx->status == ThreadStatusRunning) { 533*68d75effSDimitry Andric uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(), 534*68d75effSDimitry Andric tctx->os_id, CompareLess<int>()); 535*68d75effSDimitry Andric if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id) 536*68d75effSDimitry Andric Report("Running thread %d was not suspended. False leaks are possible.\n", 537*68d75effSDimitry Andric tctx->os_id); 538*68d75effSDimitry Andric } 539*68d75effSDimitry Andric } 540*68d75effSDimitry Andric 541*68d75effSDimitry Andric static void ReportUnsuspendedThreads( 542*68d75effSDimitry Andric const SuspendedThreadsList &suspended_threads) { 543*68d75effSDimitry Andric InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount()); 544*68d75effSDimitry Andric for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i) 545*68d75effSDimitry Andric threads[i] = suspended_threads.GetThreadID(i); 546*68d75effSDimitry Andric 547*68d75effSDimitry Andric Sort(threads.data(), threads.size()); 548*68d75effSDimitry Andric 549*68d75effSDimitry Andric GetThreadRegistryLocked()->RunCallbackForEachThreadLocked( 550*68d75effSDimitry Andric &ReportIfNotSuspended, &threads); 551*68d75effSDimitry Andric } 552*68d75effSDimitry Andric 553*68d75effSDimitry Andric static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, 554*68d75effSDimitry Andric void *arg) { 555*68d75effSDimitry Andric CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg); 556*68d75effSDimitry Andric CHECK(param); 557*68d75effSDimitry Andric CHECK(!param->success); 558*68d75effSDimitry Andric ReportUnsuspendedThreads(suspended_threads); 559*68d75effSDimitry Andric ClassifyAllChunks(suspended_threads); 560*68d75effSDimitry Andric ForEachChunk(CollectLeaksCb, ¶m->leak_report); 561*68d75effSDimitry Andric // Clean up for subsequent leak checks. This assumes we did not overwrite any 562*68d75effSDimitry Andric // kIgnored tags. 563*68d75effSDimitry Andric ForEachChunk(ResetTagsCb, nullptr); 564*68d75effSDimitry Andric param->success = true; 565*68d75effSDimitry Andric } 566*68d75effSDimitry Andric 567*68d75effSDimitry Andric static bool CheckForLeaks() { 568*68d75effSDimitry Andric if (&__lsan_is_turned_off && __lsan_is_turned_off()) 569*68d75effSDimitry Andric return false; 570*68d75effSDimitry Andric EnsureMainThreadIDIsCorrect(); 571*68d75effSDimitry Andric CheckForLeaksParam param; 572*68d75effSDimitry Andric param.success = false; 573*68d75effSDimitry Andric LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m); 574*68d75effSDimitry Andric 575*68d75effSDimitry Andric if (!param.success) { 576*68d75effSDimitry Andric Report("LeakSanitizer has encountered a fatal error.\n"); 577*68d75effSDimitry Andric Report( 578*68d75effSDimitry Andric "HINT: For debugging, try setting environment variable " 579*68d75effSDimitry Andric "LSAN_OPTIONS=verbosity=1:log_threads=1\n"); 580*68d75effSDimitry Andric Report( 581*68d75effSDimitry Andric "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n"); 582*68d75effSDimitry Andric Die(); 583*68d75effSDimitry Andric } 584*68d75effSDimitry Andric param.leak_report.ApplySuppressions(); 585*68d75effSDimitry Andric uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount(); 586*68d75effSDimitry Andric if (unsuppressed_count > 0) { 587*68d75effSDimitry Andric Decorator d; 588*68d75effSDimitry Andric Printf("\n" 589*68d75effSDimitry Andric "=================================================================" 590*68d75effSDimitry Andric "\n"); 591*68d75effSDimitry Andric Printf("%s", d.Error()); 592*68d75effSDimitry Andric Report("ERROR: LeakSanitizer: detected memory leaks\n"); 593*68d75effSDimitry Andric Printf("%s", d.Default()); 594*68d75effSDimitry Andric param.leak_report.ReportTopLeaks(flags()->max_leaks); 595*68d75effSDimitry Andric } 596*68d75effSDimitry Andric if (common_flags()->print_suppressions) 597*68d75effSDimitry Andric PrintMatchedSuppressions(); 598*68d75effSDimitry Andric if (unsuppressed_count > 0) { 599*68d75effSDimitry Andric param.leak_report.PrintSummary(); 600*68d75effSDimitry Andric return true; 601*68d75effSDimitry Andric } 602*68d75effSDimitry Andric return false; 603*68d75effSDimitry Andric } 604*68d75effSDimitry Andric 605*68d75effSDimitry Andric static bool has_reported_leaks = false; 606*68d75effSDimitry Andric bool HasReportedLeaks() { return has_reported_leaks; } 607*68d75effSDimitry Andric 608*68d75effSDimitry Andric void DoLeakCheck() { 609*68d75effSDimitry Andric BlockingMutexLock l(&global_mutex); 610*68d75effSDimitry Andric static bool already_done; 611*68d75effSDimitry Andric if (already_done) return; 612*68d75effSDimitry Andric already_done = true; 613*68d75effSDimitry Andric has_reported_leaks = CheckForLeaks(); 614*68d75effSDimitry Andric if (has_reported_leaks) HandleLeaks(); 615*68d75effSDimitry Andric } 616*68d75effSDimitry Andric 617*68d75effSDimitry Andric static int DoRecoverableLeakCheck() { 618*68d75effSDimitry Andric BlockingMutexLock l(&global_mutex); 619*68d75effSDimitry Andric bool have_leaks = CheckForLeaks(); 620*68d75effSDimitry Andric return have_leaks ? 1 : 0; 621*68d75effSDimitry Andric } 622*68d75effSDimitry Andric 623*68d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); } 624*68d75effSDimitry Andric 625*68d75effSDimitry Andric static Suppression *GetSuppressionForAddr(uptr addr) { 626*68d75effSDimitry Andric Suppression *s = nullptr; 627*68d75effSDimitry Andric 628*68d75effSDimitry Andric // Suppress by module name. 629*68d75effSDimitry Andric SuppressionContext *suppressions = GetSuppressionContext(); 630*68d75effSDimitry Andric if (const char *module_name = 631*68d75effSDimitry Andric Symbolizer::GetOrInit()->GetModuleNameForPc(addr)) 632*68d75effSDimitry Andric if (suppressions->Match(module_name, kSuppressionLeak, &s)) 633*68d75effSDimitry Andric return s; 634*68d75effSDimitry Andric 635*68d75effSDimitry Andric // Suppress by file or function name. 636*68d75effSDimitry Andric SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr); 637*68d75effSDimitry Andric for (SymbolizedStack *cur = frames; cur; cur = cur->next) { 638*68d75effSDimitry Andric if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) || 639*68d75effSDimitry Andric suppressions->Match(cur->info.file, kSuppressionLeak, &s)) { 640*68d75effSDimitry Andric break; 641*68d75effSDimitry Andric } 642*68d75effSDimitry Andric } 643*68d75effSDimitry Andric frames->ClearAll(); 644*68d75effSDimitry Andric return s; 645*68d75effSDimitry Andric } 646*68d75effSDimitry Andric 647*68d75effSDimitry Andric static Suppression *GetSuppressionForStack(u32 stack_trace_id) { 648*68d75effSDimitry Andric StackTrace stack = StackDepotGet(stack_trace_id); 649*68d75effSDimitry Andric for (uptr i = 0; i < stack.size; i++) { 650*68d75effSDimitry Andric Suppression *s = GetSuppressionForAddr( 651*68d75effSDimitry Andric StackTrace::GetPreviousInstructionPc(stack.trace[i])); 652*68d75effSDimitry Andric if (s) return s; 653*68d75effSDimitry Andric } 654*68d75effSDimitry Andric return nullptr; 655*68d75effSDimitry Andric } 656*68d75effSDimitry Andric 657*68d75effSDimitry Andric ///// LeakReport implementation. ///// 658*68d75effSDimitry Andric 659*68d75effSDimitry Andric // A hard limit on the number of distinct leaks, to avoid quadratic complexity 660*68d75effSDimitry Andric // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks 661*68d75effSDimitry Andric // in real-world applications. 662*68d75effSDimitry Andric // FIXME: Get rid of this limit by changing the implementation of LeakReport to 663*68d75effSDimitry Andric // use a hash table. 664*68d75effSDimitry Andric const uptr kMaxLeaksConsidered = 5000; 665*68d75effSDimitry Andric 666*68d75effSDimitry Andric void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, 667*68d75effSDimitry Andric uptr leaked_size, ChunkTag tag) { 668*68d75effSDimitry Andric CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); 669*68d75effSDimitry Andric bool is_directly_leaked = (tag == kDirectlyLeaked); 670*68d75effSDimitry Andric uptr i; 671*68d75effSDimitry Andric for (i = 0; i < leaks_.size(); i++) { 672*68d75effSDimitry Andric if (leaks_[i].stack_trace_id == stack_trace_id && 673*68d75effSDimitry Andric leaks_[i].is_directly_leaked == is_directly_leaked) { 674*68d75effSDimitry Andric leaks_[i].hit_count++; 675*68d75effSDimitry Andric leaks_[i].total_size += leaked_size; 676*68d75effSDimitry Andric break; 677*68d75effSDimitry Andric } 678*68d75effSDimitry Andric } 679*68d75effSDimitry Andric if (i == leaks_.size()) { 680*68d75effSDimitry Andric if (leaks_.size() == kMaxLeaksConsidered) return; 681*68d75effSDimitry Andric Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id, 682*68d75effSDimitry Andric is_directly_leaked, /* is_suppressed */ false }; 683*68d75effSDimitry Andric leaks_.push_back(leak); 684*68d75effSDimitry Andric } 685*68d75effSDimitry Andric if (flags()->report_objects) { 686*68d75effSDimitry Andric LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; 687*68d75effSDimitry Andric leaked_objects_.push_back(obj); 688*68d75effSDimitry Andric } 689*68d75effSDimitry Andric } 690*68d75effSDimitry Andric 691*68d75effSDimitry Andric static bool LeakComparator(const Leak &leak1, const Leak &leak2) { 692*68d75effSDimitry Andric if (leak1.is_directly_leaked == leak2.is_directly_leaked) 693*68d75effSDimitry Andric return leak1.total_size > leak2.total_size; 694*68d75effSDimitry Andric else 695*68d75effSDimitry Andric return leak1.is_directly_leaked; 696*68d75effSDimitry Andric } 697*68d75effSDimitry Andric 698*68d75effSDimitry Andric void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) { 699*68d75effSDimitry Andric CHECK(leaks_.size() <= kMaxLeaksConsidered); 700*68d75effSDimitry Andric Printf("\n"); 701*68d75effSDimitry Andric if (leaks_.size() == kMaxLeaksConsidered) 702*68d75effSDimitry Andric Printf("Too many leaks! Only the first %zu leaks encountered will be " 703*68d75effSDimitry Andric "reported.\n", 704*68d75effSDimitry Andric kMaxLeaksConsidered); 705*68d75effSDimitry Andric 706*68d75effSDimitry Andric uptr unsuppressed_count = UnsuppressedLeakCount(); 707*68d75effSDimitry Andric if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count) 708*68d75effSDimitry Andric Printf("The %zu top leak(s):\n", num_leaks_to_report); 709*68d75effSDimitry Andric Sort(leaks_.data(), leaks_.size(), &LeakComparator); 710*68d75effSDimitry Andric uptr leaks_reported = 0; 711*68d75effSDimitry Andric for (uptr i = 0; i < leaks_.size(); i++) { 712*68d75effSDimitry Andric if (leaks_[i].is_suppressed) continue; 713*68d75effSDimitry Andric PrintReportForLeak(i); 714*68d75effSDimitry Andric leaks_reported++; 715*68d75effSDimitry Andric if (leaks_reported == num_leaks_to_report) break; 716*68d75effSDimitry Andric } 717*68d75effSDimitry Andric if (leaks_reported < unsuppressed_count) { 718*68d75effSDimitry Andric uptr remaining = unsuppressed_count - leaks_reported; 719*68d75effSDimitry Andric Printf("Omitting %zu more leak(s).\n", remaining); 720*68d75effSDimitry Andric } 721*68d75effSDimitry Andric } 722*68d75effSDimitry Andric 723*68d75effSDimitry Andric void LeakReport::PrintReportForLeak(uptr index) { 724*68d75effSDimitry Andric Decorator d; 725*68d75effSDimitry Andric Printf("%s", d.Leak()); 726*68d75effSDimitry Andric Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n", 727*68d75effSDimitry Andric leaks_[index].is_directly_leaked ? "Direct" : "Indirect", 728*68d75effSDimitry Andric leaks_[index].total_size, leaks_[index].hit_count); 729*68d75effSDimitry Andric Printf("%s", d.Default()); 730*68d75effSDimitry Andric 731*68d75effSDimitry Andric PrintStackTraceById(leaks_[index].stack_trace_id); 732*68d75effSDimitry Andric 733*68d75effSDimitry Andric if (flags()->report_objects) { 734*68d75effSDimitry Andric Printf("Objects leaked above:\n"); 735*68d75effSDimitry Andric PrintLeakedObjectsForLeak(index); 736*68d75effSDimitry Andric Printf("\n"); 737*68d75effSDimitry Andric } 738*68d75effSDimitry Andric } 739*68d75effSDimitry Andric 740*68d75effSDimitry Andric void LeakReport::PrintLeakedObjectsForLeak(uptr index) { 741*68d75effSDimitry Andric u32 leak_id = leaks_[index].id; 742*68d75effSDimitry Andric for (uptr j = 0; j < leaked_objects_.size(); j++) { 743*68d75effSDimitry Andric if (leaked_objects_[j].leak_id == leak_id) 744*68d75effSDimitry Andric Printf("%p (%zu bytes)\n", leaked_objects_[j].addr, 745*68d75effSDimitry Andric leaked_objects_[j].size); 746*68d75effSDimitry Andric } 747*68d75effSDimitry Andric } 748*68d75effSDimitry Andric 749*68d75effSDimitry Andric void LeakReport::PrintSummary() { 750*68d75effSDimitry Andric CHECK(leaks_.size() <= kMaxLeaksConsidered); 751*68d75effSDimitry Andric uptr bytes = 0, allocations = 0; 752*68d75effSDimitry Andric for (uptr i = 0; i < leaks_.size(); i++) { 753*68d75effSDimitry Andric if (leaks_[i].is_suppressed) continue; 754*68d75effSDimitry Andric bytes += leaks_[i].total_size; 755*68d75effSDimitry Andric allocations += leaks_[i].hit_count; 756*68d75effSDimitry Andric } 757*68d75effSDimitry Andric InternalScopedString summary(kMaxSummaryLength); 758*68d75effSDimitry Andric summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes, 759*68d75effSDimitry Andric allocations); 760*68d75effSDimitry Andric ReportErrorSummary(summary.data()); 761*68d75effSDimitry Andric } 762*68d75effSDimitry Andric 763*68d75effSDimitry Andric void LeakReport::ApplySuppressions() { 764*68d75effSDimitry Andric for (uptr i = 0; i < leaks_.size(); i++) { 765*68d75effSDimitry Andric Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id); 766*68d75effSDimitry Andric if (s) { 767*68d75effSDimitry Andric s->weight += leaks_[i].total_size; 768*68d75effSDimitry Andric atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) + 769*68d75effSDimitry Andric leaks_[i].hit_count); 770*68d75effSDimitry Andric leaks_[i].is_suppressed = true; 771*68d75effSDimitry Andric } 772*68d75effSDimitry Andric } 773*68d75effSDimitry Andric } 774*68d75effSDimitry Andric 775*68d75effSDimitry Andric uptr LeakReport::UnsuppressedLeakCount() { 776*68d75effSDimitry Andric uptr result = 0; 777*68d75effSDimitry Andric for (uptr i = 0; i < leaks_.size(); i++) 778*68d75effSDimitry Andric if (!leaks_[i].is_suppressed) result++; 779*68d75effSDimitry Andric return result; 780*68d75effSDimitry Andric } 781*68d75effSDimitry Andric 782*68d75effSDimitry Andric } // namespace __lsan 783*68d75effSDimitry Andric #else // CAN_SANITIZE_LEAKS 784*68d75effSDimitry Andric namespace __lsan { 785*68d75effSDimitry Andric void InitCommonLsan() { } 786*68d75effSDimitry Andric void DoLeakCheck() { } 787*68d75effSDimitry Andric void DoRecoverableLeakCheckVoid() { } 788*68d75effSDimitry Andric void DisableInThisThread() { } 789*68d75effSDimitry Andric void EnableInThisThread() { } 790*68d75effSDimitry Andric } 791*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 792*68d75effSDimitry Andric 793*68d75effSDimitry Andric using namespace __lsan; 794*68d75effSDimitry Andric 795*68d75effSDimitry Andric extern "C" { 796*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 797*68d75effSDimitry Andric void __lsan_ignore_object(const void *p) { 798*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 799*68d75effSDimitry Andric if (!common_flags()->detect_leaks) 800*68d75effSDimitry Andric return; 801*68d75effSDimitry Andric // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not 802*68d75effSDimitry Andric // locked. 803*68d75effSDimitry Andric BlockingMutexLock l(&global_mutex); 804*68d75effSDimitry Andric IgnoreObjectResult res = IgnoreObjectLocked(p); 805*68d75effSDimitry Andric if (res == kIgnoreObjectInvalid) 806*68d75effSDimitry Andric VReport(1, "__lsan_ignore_object(): no heap object found at %p", p); 807*68d75effSDimitry Andric if (res == kIgnoreObjectAlreadyIgnored) 808*68d75effSDimitry Andric VReport(1, "__lsan_ignore_object(): " 809*68d75effSDimitry Andric "heap object at %p is already being ignored\n", p); 810*68d75effSDimitry Andric if (res == kIgnoreObjectSuccess) 811*68d75effSDimitry Andric VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p); 812*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 813*68d75effSDimitry Andric } 814*68d75effSDimitry Andric 815*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 816*68d75effSDimitry Andric void __lsan_register_root_region(const void *begin, uptr size) { 817*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 818*68d75effSDimitry Andric BlockingMutexLock l(&global_mutex); 819*68d75effSDimitry Andric CHECK(root_regions); 820*68d75effSDimitry Andric RootRegion region = {reinterpret_cast<uptr>(begin), size}; 821*68d75effSDimitry Andric root_regions->push_back(region); 822*68d75effSDimitry Andric VReport(1, "Registered root region at %p of size %llu\n", begin, size); 823*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 824*68d75effSDimitry Andric } 825*68d75effSDimitry Andric 826*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 827*68d75effSDimitry Andric void __lsan_unregister_root_region(const void *begin, uptr size) { 828*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 829*68d75effSDimitry Andric BlockingMutexLock l(&global_mutex); 830*68d75effSDimitry Andric CHECK(root_regions); 831*68d75effSDimitry Andric bool removed = false; 832*68d75effSDimitry Andric for (uptr i = 0; i < root_regions->size(); i++) { 833*68d75effSDimitry Andric RootRegion region = (*root_regions)[i]; 834*68d75effSDimitry Andric if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) { 835*68d75effSDimitry Andric removed = true; 836*68d75effSDimitry Andric uptr last_index = root_regions->size() - 1; 837*68d75effSDimitry Andric (*root_regions)[i] = (*root_regions)[last_index]; 838*68d75effSDimitry Andric root_regions->pop_back(); 839*68d75effSDimitry Andric VReport(1, "Unregistered root region at %p of size %llu\n", begin, size); 840*68d75effSDimitry Andric break; 841*68d75effSDimitry Andric } 842*68d75effSDimitry Andric } 843*68d75effSDimitry Andric if (!removed) { 844*68d75effSDimitry Andric Report( 845*68d75effSDimitry Andric "__lsan_unregister_root_region(): region at %p of size %llu has not " 846*68d75effSDimitry Andric "been registered.\n", 847*68d75effSDimitry Andric begin, size); 848*68d75effSDimitry Andric Die(); 849*68d75effSDimitry Andric } 850*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 851*68d75effSDimitry Andric } 852*68d75effSDimitry Andric 853*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 854*68d75effSDimitry Andric void __lsan_disable() { 855*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 856*68d75effSDimitry Andric __lsan::DisableInThisThread(); 857*68d75effSDimitry Andric #endif 858*68d75effSDimitry Andric } 859*68d75effSDimitry Andric 860*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 861*68d75effSDimitry Andric void __lsan_enable() { 862*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 863*68d75effSDimitry Andric __lsan::EnableInThisThread(); 864*68d75effSDimitry Andric #endif 865*68d75effSDimitry Andric } 866*68d75effSDimitry Andric 867*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 868*68d75effSDimitry Andric void __lsan_do_leak_check() { 869*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 870*68d75effSDimitry Andric if (common_flags()->detect_leaks) 871*68d75effSDimitry Andric __lsan::DoLeakCheck(); 872*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 873*68d75effSDimitry Andric } 874*68d75effSDimitry Andric 875*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE 876*68d75effSDimitry Andric int __lsan_do_recoverable_leak_check() { 877*68d75effSDimitry Andric #if CAN_SANITIZE_LEAKS 878*68d75effSDimitry Andric if (common_flags()->detect_leaks) 879*68d75effSDimitry Andric return __lsan::DoRecoverableLeakCheck(); 880*68d75effSDimitry Andric #endif // CAN_SANITIZE_LEAKS 881*68d75effSDimitry Andric return 0; 882*68d75effSDimitry Andric } 883*68d75effSDimitry Andric 884*68d75effSDimitry Andric #if !SANITIZER_SUPPORTS_WEAK_HOOKS 885*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 886*68d75effSDimitry Andric const char * __lsan_default_options() { 887*68d75effSDimitry Andric return ""; 888*68d75effSDimitry Andric } 889*68d75effSDimitry Andric 890*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 891*68d75effSDimitry Andric int __lsan_is_turned_off() { 892*68d75effSDimitry Andric return 0; 893*68d75effSDimitry Andric } 894*68d75effSDimitry Andric 895*68d75effSDimitry Andric SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 896*68d75effSDimitry Andric const char *__lsan_default_suppressions() { 897*68d75effSDimitry Andric return ""; 898*68d75effSDimitry Andric } 899*68d75effSDimitry Andric #endif 900*68d75effSDimitry Andric } // extern "C" 901