13cab2bb3Spatrick //=-- lsan_common.cpp -----------------------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of LeakSanitizer.
103cab2bb3Spatrick // Implementation of common leak checking functionality.
113cab2bb3Spatrick //
123cab2bb3Spatrick //===----------------------------------------------------------------------===//
133cab2bb3Spatrick
143cab2bb3Spatrick #include "lsan_common.h"
153cab2bb3Spatrick
163cab2bb3Spatrick #include "sanitizer_common/sanitizer_common.h"
173cab2bb3Spatrick #include "sanitizer_common/sanitizer_flag_parser.h"
183cab2bb3Spatrick #include "sanitizer_common/sanitizer_flags.h"
193cab2bb3Spatrick #include "sanitizer_common/sanitizer_placement_new.h"
203cab2bb3Spatrick #include "sanitizer_common/sanitizer_procmaps.h"
213cab2bb3Spatrick #include "sanitizer_common/sanitizer_report_decorator.h"
223cab2bb3Spatrick #include "sanitizer_common/sanitizer_stackdepot.h"
233cab2bb3Spatrick #include "sanitizer_common/sanitizer_stacktrace.h"
243cab2bb3Spatrick #include "sanitizer_common/sanitizer_suppressions.h"
253cab2bb3Spatrick #include "sanitizer_common/sanitizer_thread_registry.h"
263cab2bb3Spatrick #include "sanitizer_common/sanitizer_tls_get_addr.h"
273cab2bb3Spatrick
283cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
29*810390e3Srobert
30*810390e3Srobert # if SANITIZER_APPLE
31*810390e3Srobert // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32*810390e3Srobert # if SANITIZER_IOS && !SANITIZER_IOSSIM
33*810390e3Srobert # define OBJC_DATA_MASK 0x0000007ffffffff8UL
34*810390e3Srobert # else
35*810390e3Srobert # define OBJC_DATA_MASK 0x00007ffffffffff8UL
36*810390e3Srobert # endif
37*810390e3Srobert // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L139
38*810390e3Srobert # define OBJC_FAST_IS_RW 0x8000000000000000UL
39*810390e3Srobert # endif
40*810390e3Srobert
413cab2bb3Spatrick namespace __lsan {
423cab2bb3Spatrick
433cab2bb3Spatrick // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
443cab2bb3Spatrick // also to protect the global list of root regions.
45*810390e3Srobert Mutex global_mutex;
463cab2bb3Spatrick
473cab2bb3Spatrick Flags lsan_flags;
483cab2bb3Spatrick
DisableCounterUnderflow()493cab2bb3Spatrick void DisableCounterUnderflow() {
503cab2bb3Spatrick if (common_flags()->detect_leaks) {
513cab2bb3Spatrick Report("Unmatched call to __lsan_enable().\n");
523cab2bb3Spatrick Die();
533cab2bb3Spatrick }
543cab2bb3Spatrick }
553cab2bb3Spatrick
SetDefaults()563cab2bb3Spatrick void Flags::SetDefaults() {
573cab2bb3Spatrick # define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
583cab2bb3Spatrick # include "lsan_flags.inc"
593cab2bb3Spatrick # undef LSAN_FLAG
603cab2bb3Spatrick }
613cab2bb3Spatrick
RegisterLsanFlags(FlagParser * parser,Flags * f)623cab2bb3Spatrick void RegisterLsanFlags(FlagParser *parser, Flags *f) {
633cab2bb3Spatrick # define LSAN_FLAG(Type, Name, DefaultValue, Description) \
643cab2bb3Spatrick RegisterFlag(parser, #Name, Description, &f->Name);
653cab2bb3Spatrick # include "lsan_flags.inc"
663cab2bb3Spatrick # undef LSAN_FLAG
673cab2bb3Spatrick }
683cab2bb3Spatrick
693cab2bb3Spatrick # define LOG_POINTERS(...) \
703cab2bb3Spatrick do { \
71*810390e3Srobert if (flags()->log_pointers) \
72*810390e3Srobert Report(__VA_ARGS__); \
733cab2bb3Spatrick } while (0)
743cab2bb3Spatrick
753cab2bb3Spatrick # define LOG_THREADS(...) \
763cab2bb3Spatrick do { \
77*810390e3Srobert if (flags()->log_threads) \
78*810390e3Srobert Report(__VA_ARGS__); \
793cab2bb3Spatrick } while (0)
803cab2bb3Spatrick
81d89ec533Spatrick class LeakSuppressionContext {
82d89ec533Spatrick bool parsed = false;
83d89ec533Spatrick SuppressionContext context;
84d89ec533Spatrick bool suppressed_stacks_sorted = true;
85d89ec533Spatrick InternalMmapVector<u32> suppressed_stacks;
86*810390e3Srobert const LoadedModule *suppress_module = nullptr;
87d89ec533Spatrick
88d89ec533Spatrick void LazyInit();
89*810390e3Srobert Suppression *GetSuppressionForAddr(uptr addr);
90*810390e3Srobert bool SuppressInvalid(const StackTrace &stack);
91*810390e3Srobert bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
92d89ec533Spatrick
93d89ec533Spatrick public:
LeakSuppressionContext(const char * supprression_types[],int suppression_types_num)94d89ec533Spatrick LeakSuppressionContext(const char *supprression_types[],
95d89ec533Spatrick int suppression_types_num)
96d89ec533Spatrick : context(supprression_types, suppression_types_num) {}
97d89ec533Spatrick
98*810390e3Srobert bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
99d89ec533Spatrick
GetSortedSuppressedStacks()100d89ec533Spatrick const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
101d89ec533Spatrick if (!suppressed_stacks_sorted) {
102d89ec533Spatrick suppressed_stacks_sorted = true;
103d89ec533Spatrick SortAndDedup(suppressed_stacks);
104d89ec533Spatrick }
105d89ec533Spatrick return suppressed_stacks;
106d89ec533Spatrick }
107d89ec533Spatrick void PrintMatchedSuppressions();
108d89ec533Spatrick };
109d89ec533Spatrick
110d89ec533Spatrick ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
111d89ec533Spatrick static LeakSuppressionContext *suppression_ctx = nullptr;
1123cab2bb3Spatrick static const char kSuppressionLeak[] = "leak";
1133cab2bb3Spatrick static const char *kSuppressionTypes[] = {kSuppressionLeak};
1143cab2bb3Spatrick static const char kStdSuppressions[] =
1153cab2bb3Spatrick # if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
1163cab2bb3Spatrick // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
1173cab2bb3Spatrick // definition.
1183cab2bb3Spatrick "leak:*pthread_exit*\n"
1193cab2bb3Spatrick # endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
120*810390e3Srobert # if SANITIZER_APPLE
1213cab2bb3Spatrick // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
1223cab2bb3Spatrick "leak:*_os_trace*\n"
1233cab2bb3Spatrick # endif
1243cab2bb3Spatrick // TLS leak in some glibc versions, described in
1253cab2bb3Spatrick // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
1263cab2bb3Spatrick "leak:*tls_get_addr*\n";
1273cab2bb3Spatrick
InitializeSuppressions()1283cab2bb3Spatrick void InitializeSuppressions() {
1293cab2bb3Spatrick CHECK_EQ(nullptr, suppression_ctx);
1303cab2bb3Spatrick suppression_ctx = new (suppression_placeholder)
131d89ec533Spatrick LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
1323cab2bb3Spatrick }
1333cab2bb3Spatrick
LazyInit()134d89ec533Spatrick void LeakSuppressionContext::LazyInit() {
135d89ec533Spatrick if (!parsed) {
136d89ec533Spatrick parsed = true;
137d89ec533Spatrick context.ParseFromFile(flags()->suppressions);
138d89ec533Spatrick if (&__lsan_default_suppressions)
139d89ec533Spatrick context.Parse(__lsan_default_suppressions());
140d89ec533Spatrick context.Parse(kStdSuppressions);
141*810390e3Srobert if (flags()->use_tls && flags()->use_ld_allocations)
142*810390e3Srobert suppress_module = GetLinker();
143d89ec533Spatrick }
144d89ec533Spatrick }
145d89ec533Spatrick
GetSuppressionForAddr(uptr addr)146*810390e3Srobert Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
147*810390e3Srobert Suppression *s = nullptr;
148*810390e3Srobert
149*810390e3Srobert // Suppress by module name.
150*810390e3Srobert const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
151*810390e3Srobert if (!module_name)
152*810390e3Srobert module_name = "<unknown module>";
153*810390e3Srobert if (context.Match(module_name, kSuppressionLeak, &s))
154*810390e3Srobert return s;
155*810390e3Srobert
156*810390e3Srobert // Suppress by file or function name.
157*810390e3Srobert SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
158*810390e3Srobert for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
159*810390e3Srobert if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
160*810390e3Srobert context.Match(cur->info.file, kSuppressionLeak, &s)) {
161*810390e3Srobert break;
162*810390e3Srobert }
163*810390e3Srobert }
164*810390e3Srobert frames->ClearAll();
165*810390e3Srobert return s;
166*810390e3Srobert }
167*810390e3Srobert
GetCallerPC(const StackTrace & stack)168*810390e3Srobert static uptr GetCallerPC(const StackTrace &stack) {
169*810390e3Srobert // The top frame is our malloc/calloc/etc. The next frame is the caller.
170*810390e3Srobert if (stack.size >= 2)
171*810390e3Srobert return stack.trace[1];
172*810390e3Srobert return 0;
173*810390e3Srobert }
174*810390e3Srobert
175*810390e3Srobert # if SANITIZER_APPLE
176*810390e3Srobert // Objective-C class data pointers are stored with flags in the low bits, so
177*810390e3Srobert // they need to be transformed back into something that looks like a pointer.
MaybeTransformPointer(void * p)178*810390e3Srobert static inline void *MaybeTransformPointer(void *p) {
179*810390e3Srobert uptr ptr = reinterpret_cast<uptr>(p);
180*810390e3Srobert if ((ptr & OBJC_FAST_IS_RW) == OBJC_FAST_IS_RW)
181*810390e3Srobert ptr &= OBJC_DATA_MASK;
182*810390e3Srobert return reinterpret_cast<void *>(ptr);
183*810390e3Srobert }
184*810390e3Srobert # endif
185*810390e3Srobert
186*810390e3Srobert // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
187*810390e3Srobert // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
188*810390e3Srobert // modules accounting etc.
189*810390e3Srobert // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
190*810390e3Srobert // They are allocated with a __libc_memalign() call in allocate_and_init()
191*810390e3Srobert // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
192*810390e3Srobert // blocks, but we can make sure they come from our own allocator by intercepting
193*810390e3Srobert // __libc_memalign(). On top of that, there is no easy way to reach them. Their
194*810390e3Srobert // addresses are stored in a dynamically allocated array (the DTV) which is
195*810390e3Srobert // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
196*810390e3Srobert // being reachable from the static TLS, and the dynamic TLS being reachable from
197*810390e3Srobert // the DTV. This is because the initial DTV is allocated before our interception
198*810390e3Srobert // mechanism kicks in, and thus we don't recognize it as allocated memory. We
199*810390e3Srobert // can't special-case it either, since we don't know its size.
200*810390e3Srobert // Our solution is to include in the root set all allocations made from
201*810390e3Srobert // ld-linux.so (which is where allocate_and_init() is implemented). This is
202*810390e3Srobert // guaranteed to include all dynamic TLS blocks (and possibly other allocations
203*810390e3Srobert // which we don't care about).
204*810390e3Srobert // On all other platforms, this simply checks to ensure that the caller pc is
205*810390e3Srobert // valid before reporting chunks as leaked.
SuppressInvalid(const StackTrace & stack)206*810390e3Srobert bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
207*810390e3Srobert uptr caller_pc = GetCallerPC(stack);
208*810390e3Srobert // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
209*810390e3Srobert // it as reachable, as we can't properly report its allocation stack anyway.
210*810390e3Srobert return !caller_pc ||
211*810390e3Srobert (suppress_module && suppress_module->containsAddress(caller_pc));
212*810390e3Srobert }
213*810390e3Srobert
SuppressByRule(const StackTrace & stack,uptr hit_count,uptr total_size)214*810390e3Srobert bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
215*810390e3Srobert uptr hit_count, uptr total_size) {
216*810390e3Srobert for (uptr i = 0; i < stack.size; i++) {
217*810390e3Srobert Suppression *s = GetSuppressionForAddr(
218*810390e3Srobert StackTrace::GetPreviousInstructionPc(stack.trace[i]));
219*810390e3Srobert if (s) {
220*810390e3Srobert s->weight += total_size;
221*810390e3Srobert atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
222*810390e3Srobert return true;
223*810390e3Srobert }
224*810390e3Srobert }
225*810390e3Srobert return false;
226*810390e3Srobert }
227*810390e3Srobert
Suppress(u32 stack_trace_id,uptr hit_count,uptr total_size)228*810390e3Srobert bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
229*810390e3Srobert uptr total_size) {
230*810390e3Srobert LazyInit();
231*810390e3Srobert StackTrace stack = StackDepotGet(stack_trace_id);
232*810390e3Srobert if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
233*810390e3Srobert return false;
234*810390e3Srobert suppressed_stacks_sorted = false;
235*810390e3Srobert suppressed_stacks.push_back(stack_trace_id);
236*810390e3Srobert return true;
237*810390e3Srobert }
238*810390e3Srobert
GetSuppressionContext()239d89ec533Spatrick static LeakSuppressionContext *GetSuppressionContext() {
2403cab2bb3Spatrick CHECK(suppression_ctx);
2413cab2bb3Spatrick return suppression_ctx;
2423cab2bb3Spatrick }
2433cab2bb3Spatrick
244*810390e3Srobert static InternalMmapVectorNoCtor<RootRegion> root_regions;
2453cab2bb3Spatrick
GetRootRegions()246*810390e3Srobert InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
247*810390e3Srobert return &root_regions;
2483cab2bb3Spatrick }
2493cab2bb3Spatrick
InitCommonLsan()2503cab2bb3Spatrick void InitCommonLsan() {
2513cab2bb3Spatrick if (common_flags()->detect_leaks) {
2523cab2bb3Spatrick // Initialization which can fail or print warnings should only be done if
2533cab2bb3Spatrick // LSan is actually enabled.
2543cab2bb3Spatrick InitializeSuppressions();
2553cab2bb3Spatrick InitializePlatformSpecificModules();
2563cab2bb3Spatrick }
2573cab2bb3Spatrick }
2583cab2bb3Spatrick
2593cab2bb3Spatrick class Decorator : public __sanitizer::SanitizerCommonDecorator {
2603cab2bb3Spatrick public:
Decorator()2613cab2bb3Spatrick Decorator() : SanitizerCommonDecorator() {}
Error()2623cab2bb3Spatrick const char *Error() { return Red(); }
Leak()2633cab2bb3Spatrick const char *Leak() { return Blue(); }
2643cab2bb3Spatrick };
2653cab2bb3Spatrick
MaybeUserPointer(uptr p)266*810390e3Srobert static inline bool MaybeUserPointer(uptr p) {
2673cab2bb3Spatrick // Since our heap is located in mmap-ed memory, we can assume a sensible lower
2683cab2bb3Spatrick // bound on heap addresses.
2693cab2bb3Spatrick const uptr kMinAddress = 4 * 4096;
270*810390e3Srobert if (p < kMinAddress)
271*810390e3Srobert return false;
2723cab2bb3Spatrick # if defined(__x86_64__)
2733cab2bb3Spatrick // Accept only canonical form user-space addresses.
2743cab2bb3Spatrick return ((p >> 47) == 0);
2753cab2bb3Spatrick # elif defined(__mips64)
2763cab2bb3Spatrick return ((p >> 40) == 0);
2773cab2bb3Spatrick # elif defined(__aarch64__)
278*810390e3Srobert // Accept up to 48 bit VMA.
279*810390e3Srobert return ((p >> 48) == 0);
280*810390e3Srobert # elif defined(__loongarch_lp64)
281*810390e3Srobert // Allow 47-bit user-space VMA at current.
282*810390e3Srobert return ((p >> 47) == 0);
2833cab2bb3Spatrick # else
2843cab2bb3Spatrick return true;
2853cab2bb3Spatrick # endif
2863cab2bb3Spatrick }
2873cab2bb3Spatrick
2883cab2bb3Spatrick // Scans the memory range, looking for byte patterns that point into allocator
2893cab2bb3Spatrick // chunks. Marks those chunks with |tag| and adds them to |frontier|.
2903cab2bb3Spatrick // There are two usage modes for this function: finding reachable chunks
2913cab2bb3Spatrick // (|tag| = kReachable) and finding indirectly leaked chunks
2923cab2bb3Spatrick // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
2933cab2bb3Spatrick // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)294*810390e3Srobert void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
2953cab2bb3Spatrick const char *region_type, ChunkTag tag) {
2963cab2bb3Spatrick CHECK(tag == kReachable || tag == kIndirectlyLeaked);
2973cab2bb3Spatrick const uptr alignment = flags()->pointer_alignment();
298*810390e3Srobert LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
299*810390e3Srobert (void *)end);
3003cab2bb3Spatrick uptr pp = begin;
3013cab2bb3Spatrick if (pp % alignment)
3023cab2bb3Spatrick pp = pp + alignment - pp % alignment;
3033cab2bb3Spatrick for (; pp + sizeof(void *) <= end; pp += alignment) {
3043cab2bb3Spatrick void *p = *reinterpret_cast<void **>(pp);
305*810390e3Srobert # if SANITIZER_APPLE
306*810390e3Srobert p = MaybeTransformPointer(p);
307*810390e3Srobert # endif
308*810390e3Srobert if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
309*810390e3Srobert continue;
3103cab2bb3Spatrick uptr chunk = PointsIntoChunk(p);
311*810390e3Srobert if (!chunk)
312*810390e3Srobert continue;
3133cab2bb3Spatrick // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
314*810390e3Srobert if (chunk == begin)
315*810390e3Srobert continue;
3163cab2bb3Spatrick LsanMetadata m(chunk);
317*810390e3Srobert if (m.tag() == kReachable || m.tag() == kIgnored)
318*810390e3Srobert continue;
3193cab2bb3Spatrick
3203cab2bb3Spatrick // Do this check relatively late so we can log only the interesting cases.
3213cab2bb3Spatrick if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
3223cab2bb3Spatrick LOG_POINTERS(
3233cab2bb3Spatrick "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
3243cab2bb3Spatrick "%zu.\n",
325*810390e3Srobert (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
326*810390e3Srobert m.requested_size());
3273cab2bb3Spatrick continue;
3283cab2bb3Spatrick }
3293cab2bb3Spatrick
3303cab2bb3Spatrick m.set_tag(tag);
331*810390e3Srobert LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
332*810390e3Srobert (void *)pp, p, (void *)chunk,
333*810390e3Srobert (void *)(chunk + m.requested_size()), m.requested_size());
3343cab2bb3Spatrick if (frontier)
3353cab2bb3Spatrick frontier->push_back(chunk);
3363cab2bb3Spatrick }
3373cab2bb3Spatrick }
3383cab2bb3Spatrick
3393cab2bb3Spatrick // Scans a global range for pointers
ScanGlobalRange(uptr begin,uptr end,Frontier * frontier)3403cab2bb3Spatrick void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
3413cab2bb3Spatrick uptr allocator_begin = 0, allocator_end = 0;
3423cab2bb3Spatrick GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
3433cab2bb3Spatrick if (begin <= allocator_begin && allocator_begin < end) {
3443cab2bb3Spatrick CHECK_LE(allocator_begin, allocator_end);
3453cab2bb3Spatrick CHECK_LE(allocator_end, end);
3463cab2bb3Spatrick if (begin < allocator_begin)
3473cab2bb3Spatrick ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
3483cab2bb3Spatrick kReachable);
3493cab2bb3Spatrick if (allocator_end < end)
3503cab2bb3Spatrick ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
3513cab2bb3Spatrick } else {
3523cab2bb3Spatrick ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
3533cab2bb3Spatrick }
3543cab2bb3Spatrick }
3553cab2bb3Spatrick
ScanExtraStackRanges(const InternalMmapVector<Range> & ranges,Frontier * frontier)356*810390e3Srobert void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
357*810390e3Srobert Frontier *frontier) {
358*810390e3Srobert for (uptr i = 0; i < ranges.size(); i++) {
359*810390e3Srobert ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
360*810390e3Srobert kReachable);
361*810390e3Srobert }
3623cab2bb3Spatrick }
3633cab2bb3Spatrick
3641f9cb04fSpatrick # if SANITIZER_FUCHSIA
3651f9cb04fSpatrick
3661f9cb04fSpatrick // Fuchsia handles all threads together with its own callback.
ProcessThreads(SuspendedThreadsList const &,Frontier *,tid_t,uptr)367*810390e3Srobert static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
368*810390e3Srobert uptr) {}
3691f9cb04fSpatrick
3701f9cb04fSpatrick # else
3711f9cb04fSpatrick
372d89ec533Spatrick # if SANITIZER_ANDROID
373d89ec533Spatrick // FIXME: Move this out into *libcdep.cpp
374d89ec533Spatrick extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
375d89ec533Spatrick pid_t, void (*cb)(void *, void *, uptr, void *), void *);
376d89ec533Spatrick # endif
377d89ec533Spatrick
ProcessThreadRegistry(Frontier * frontier)378d89ec533Spatrick static void ProcessThreadRegistry(Frontier *frontier) {
379d89ec533Spatrick InternalMmapVector<uptr> ptrs;
380*810390e3Srobert GetAdditionalThreadContextPtrsLocked(&ptrs);
381d89ec533Spatrick
382d89ec533Spatrick for (uptr i = 0; i < ptrs.size(); ++i) {
383d89ec533Spatrick void *ptr = reinterpret_cast<void *>(ptrs[i]);
384d89ec533Spatrick uptr chunk = PointsIntoChunk(ptr);
385d89ec533Spatrick if (!chunk)
386d89ec533Spatrick continue;
387d89ec533Spatrick LsanMetadata m(chunk);
388d89ec533Spatrick if (!m.allocated())
389d89ec533Spatrick continue;
390d89ec533Spatrick
391d89ec533Spatrick // Mark as reachable and add to frontier.
392d89ec533Spatrick LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
393d89ec533Spatrick m.set_tag(kReachable);
394d89ec533Spatrick frontier->push_back(chunk);
395d89ec533Spatrick }
396d89ec533Spatrick }
397d89ec533Spatrick
3983cab2bb3Spatrick // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier,tid_t caller_tid,uptr caller_sp)3993cab2bb3Spatrick static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
400*810390e3Srobert Frontier *frontier, tid_t caller_tid,
401*810390e3Srobert uptr caller_sp) {
402d89ec533Spatrick InternalMmapVector<uptr> registers;
403*810390e3Srobert InternalMmapVector<Range> extra_ranges;
4043cab2bb3Spatrick for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
4053cab2bb3Spatrick tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
406*810390e3Srobert LOG_THREADS("Processing thread %llu.\n", os_id);
4073cab2bb3Spatrick uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
4083cab2bb3Spatrick DTLS *dtls;
409*810390e3Srobert bool thread_found =
410*810390e3Srobert GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
411*810390e3Srobert &tls_end, &cache_begin, &cache_end, &dtls);
4123cab2bb3Spatrick if (!thread_found) {
4133cab2bb3Spatrick // If a thread can't be found in the thread registry, it's probably in the
4143cab2bb3Spatrick // process of destruction. Log this event and move on.
415*810390e3Srobert LOG_THREADS("Thread %llu not found in registry.\n", os_id);
4163cab2bb3Spatrick continue;
4173cab2bb3Spatrick }
4183cab2bb3Spatrick uptr sp;
4193cab2bb3Spatrick PtraceRegistersStatus have_registers =
420d89ec533Spatrick suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
4213cab2bb3Spatrick if (have_registers != REGISTERS_AVAILABLE) {
422*810390e3Srobert Report("Unable to get registers from thread %llu.\n", os_id);
4233cab2bb3Spatrick // If unable to get SP, consider the entire stack to be reachable unless
4243cab2bb3Spatrick // GetRegistersAndSP failed with ESRCH.
425*810390e3Srobert if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
426*810390e3Srobert continue;
4273cab2bb3Spatrick sp = stack_begin;
4283cab2bb3Spatrick }
429*810390e3Srobert if (suspended_threads.GetThreadID(i) == caller_tid) {
430*810390e3Srobert sp = caller_sp;
431*810390e3Srobert }
4323cab2bb3Spatrick
433d89ec533Spatrick if (flags()->use_registers && have_registers) {
434d89ec533Spatrick uptr registers_begin = reinterpret_cast<uptr>(registers.data());
435d89ec533Spatrick uptr registers_end =
436d89ec533Spatrick reinterpret_cast<uptr>(registers.data() + registers.size());
4373cab2bb3Spatrick ScanRangeForPointers(registers_begin, registers_end, frontier,
4383cab2bb3Spatrick "REGISTERS", kReachable);
439d89ec533Spatrick }
4403cab2bb3Spatrick
4413cab2bb3Spatrick if (flags()->use_stacks) {
442*810390e3Srobert LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
443*810390e3Srobert (void *)stack_end, (void *)sp);
4443cab2bb3Spatrick if (sp < stack_begin || sp >= stack_end) {
4453cab2bb3Spatrick // SP is outside the recorded stack range (e.g. the thread is running a
4463cab2bb3Spatrick // signal handler on alternate stack, or swapcontext was used).
4473cab2bb3Spatrick // Again, consider the entire stack range to be reachable.
4483cab2bb3Spatrick LOG_THREADS("WARNING: stack pointer not in stack range.\n");
4493cab2bb3Spatrick uptr page_size = GetPageSizeCached();
4503cab2bb3Spatrick int skipped = 0;
4513cab2bb3Spatrick while (stack_begin < stack_end &&
4523cab2bb3Spatrick !IsAccessibleMemoryRange(stack_begin, 1)) {
4533cab2bb3Spatrick skipped++;
4543cab2bb3Spatrick stack_begin += page_size;
4553cab2bb3Spatrick }
4563cab2bb3Spatrick LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
457*810390e3Srobert skipped, (void *)stack_begin, (void *)stack_end);
4583cab2bb3Spatrick } else {
4593cab2bb3Spatrick // Shrink the stack range to ignore out-of-scope values.
4603cab2bb3Spatrick stack_begin = sp;
4613cab2bb3Spatrick }
4623cab2bb3Spatrick ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
4633cab2bb3Spatrick kReachable);
464*810390e3Srobert extra_ranges.clear();
465*810390e3Srobert GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
466*810390e3Srobert ScanExtraStackRanges(extra_ranges, frontier);
4673cab2bb3Spatrick }
4683cab2bb3Spatrick
4693cab2bb3Spatrick if (flags()->use_tls) {
4703cab2bb3Spatrick if (tls_begin) {
471*810390e3Srobert LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
4723cab2bb3Spatrick // If the tls and cache ranges don't overlap, scan full tls range,
4733cab2bb3Spatrick // otherwise, only scan the non-overlapping portions
4743cab2bb3Spatrick if (cache_begin == cache_end || tls_end < cache_begin ||
4753cab2bb3Spatrick tls_begin > cache_end) {
4763cab2bb3Spatrick ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
4773cab2bb3Spatrick } else {
4783cab2bb3Spatrick if (tls_begin < cache_begin)
4793cab2bb3Spatrick ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
4803cab2bb3Spatrick kReachable);
4813cab2bb3Spatrick if (tls_end > cache_end)
4823cab2bb3Spatrick ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
4833cab2bb3Spatrick kReachable);
4843cab2bb3Spatrick }
4853cab2bb3Spatrick }
486d89ec533Spatrick # if SANITIZER_ANDROID
487d89ec533Spatrick auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
488d89ec533Spatrick void *arg) -> void {
489d89ec533Spatrick ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
490d89ec533Spatrick reinterpret_cast<uptr>(dtls_end),
491d89ec533Spatrick reinterpret_cast<Frontier *>(arg), "DTLS",
492d89ec533Spatrick kReachable);
493d89ec533Spatrick };
494d89ec533Spatrick
495d89ec533Spatrick // FIXME: There might be a race-condition here (and in Bionic) if the
496d89ec533Spatrick // thread is suspended in the middle of updating its DTLS. IOWs, we
497d89ec533Spatrick // could scan already freed memory. (probably fine for now)
498d89ec533Spatrick __libc_iterate_dynamic_tls(os_id, cb, frontier);
499d89ec533Spatrick # else
5003cab2bb3Spatrick if (dtls && !DTLSInDestruction(dtls)) {
501d89ec533Spatrick ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
502d89ec533Spatrick uptr dtls_beg = dtv.beg;
503d89ec533Spatrick uptr dtls_end = dtls_beg + dtv.size;
5043cab2bb3Spatrick if (dtls_beg < dtls_end) {
505*810390e3Srobert LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
506*810390e3Srobert (void *)dtls_end);
5073cab2bb3Spatrick ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
5083cab2bb3Spatrick kReachable);
5093cab2bb3Spatrick }
510d89ec533Spatrick });
5113cab2bb3Spatrick } else {
5123cab2bb3Spatrick // We are handling a thread with DTLS under destruction. Log about
5133cab2bb3Spatrick // this and continue.
514*810390e3Srobert LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
5153cab2bb3Spatrick }
516d89ec533Spatrick # endif
5173cab2bb3Spatrick }
5183cab2bb3Spatrick }
519d89ec533Spatrick
520d89ec533Spatrick // Add pointers reachable from ThreadContexts
521d89ec533Spatrick ProcessThreadRegistry(frontier);
5223cab2bb3Spatrick }
5233cab2bb3Spatrick
5241f9cb04fSpatrick # endif // SANITIZER_FUCHSIA
5251f9cb04fSpatrick
ScanRootRegion(Frontier * frontier,const RootRegion & root_region,uptr region_begin,uptr region_end,bool is_readable)5263cab2bb3Spatrick void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
5273cab2bb3Spatrick uptr region_begin, uptr region_end, bool is_readable) {
5283cab2bb3Spatrick uptr intersection_begin = Max(root_region.begin, region_begin);
5293cab2bb3Spatrick uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
530*810390e3Srobert if (intersection_begin >= intersection_end)
531*810390e3Srobert return;
5323cab2bb3Spatrick LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
533*810390e3Srobert (void *)root_region.begin,
534*810390e3Srobert (void *)(root_region.begin + root_region.size),
535*810390e3Srobert (void *)region_begin, (void *)region_end,
5363cab2bb3Spatrick is_readable ? "readable" : "unreadable");
5373cab2bb3Spatrick if (is_readable)
5383cab2bb3Spatrick ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
5393cab2bb3Spatrick kReachable);
5403cab2bb3Spatrick }
5413cab2bb3Spatrick
ProcessRootRegion(Frontier * frontier,const RootRegion & root_region)5423cab2bb3Spatrick static void ProcessRootRegion(Frontier *frontier,
5433cab2bb3Spatrick const RootRegion &root_region) {
5443cab2bb3Spatrick MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
5453cab2bb3Spatrick MemoryMappedSegment segment;
5463cab2bb3Spatrick while (proc_maps.Next(&segment)) {
5473cab2bb3Spatrick ScanRootRegion(frontier, root_region, segment.start, segment.end,
5483cab2bb3Spatrick segment.IsReadable());
5493cab2bb3Spatrick }
5503cab2bb3Spatrick }
5513cab2bb3Spatrick
5523cab2bb3Spatrick // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)5533cab2bb3Spatrick static void ProcessRootRegions(Frontier *frontier) {
554*810390e3Srobert if (!flags()->use_root_regions)
555*810390e3Srobert return;
556*810390e3Srobert for (uptr i = 0; i < root_regions.size(); i++)
557*810390e3Srobert ProcessRootRegion(frontier, root_regions[i]);
5583cab2bb3Spatrick }
5593cab2bb3Spatrick
FloodFillTag(Frontier * frontier,ChunkTag tag)5603cab2bb3Spatrick static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
5613cab2bb3Spatrick while (frontier->size()) {
5623cab2bb3Spatrick uptr next_chunk = frontier->back();
5633cab2bb3Spatrick frontier->pop_back();
5643cab2bb3Spatrick LsanMetadata m(next_chunk);
5653cab2bb3Spatrick ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
5663cab2bb3Spatrick "HEAP", tag);
5673cab2bb3Spatrick }
5683cab2bb3Spatrick }
5693cab2bb3Spatrick
5703cab2bb3Spatrick // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
5713cab2bb3Spatrick // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)5723cab2bb3Spatrick static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
5733cab2bb3Spatrick chunk = GetUserBegin(chunk);
5743cab2bb3Spatrick LsanMetadata m(chunk);
5753cab2bb3Spatrick if (m.allocated() && m.tag() != kReachable) {
5763cab2bb3Spatrick ScanRangeForPointers(chunk, chunk + m.requested_size(),
5773cab2bb3Spatrick /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
5783cab2bb3Spatrick }
5793cab2bb3Spatrick }
5803cab2bb3Spatrick
IgnoredSuppressedCb(uptr chunk,void * arg)581d89ec533Spatrick static void IgnoredSuppressedCb(uptr chunk, void *arg) {
582d89ec533Spatrick CHECK(arg);
583d89ec533Spatrick chunk = GetUserBegin(chunk);
584d89ec533Spatrick LsanMetadata m(chunk);
585d89ec533Spatrick if (!m.allocated() || m.tag() == kIgnored)
586d89ec533Spatrick return;
587d89ec533Spatrick
588d89ec533Spatrick const InternalMmapVector<u32> &suppressed =
589d89ec533Spatrick *static_cast<const InternalMmapVector<u32> *>(arg);
590d89ec533Spatrick uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
591d89ec533Spatrick if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
592d89ec533Spatrick return;
593d89ec533Spatrick
594*810390e3Srobert LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
595*810390e3Srobert (void *)(chunk + m.requested_size()), m.requested_size());
596d89ec533Spatrick m.set_tag(kIgnored);
597d89ec533Spatrick }
598d89ec533Spatrick
5993cab2bb3Spatrick // ForEachChunk callback. If chunk is marked as ignored, adds its address to
6003cab2bb3Spatrick // frontier.
CollectIgnoredCb(uptr chunk,void * arg)6013cab2bb3Spatrick static void CollectIgnoredCb(uptr chunk, void *arg) {
6023cab2bb3Spatrick CHECK(arg);
6033cab2bb3Spatrick chunk = GetUserBegin(chunk);
6043cab2bb3Spatrick LsanMetadata m(chunk);
6053cab2bb3Spatrick if (m.allocated() && m.tag() == kIgnored) {
606*810390e3Srobert LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
607*810390e3Srobert (void *)(chunk + m.requested_size()), m.requested_size());
6083cab2bb3Spatrick reinterpret_cast<Frontier *>(arg)->push_back(chunk);
6093cab2bb3Spatrick }
6103cab2bb3Spatrick }
6113cab2bb3Spatrick
6123cab2bb3Spatrick // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads,Frontier * frontier,tid_t caller_tid,uptr caller_sp)6131f9cb04fSpatrick static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
614*810390e3Srobert Frontier *frontier, tid_t caller_tid,
615*810390e3Srobert uptr caller_sp) {
616d89ec533Spatrick const InternalMmapVector<u32> &suppressed_stacks =
617d89ec533Spatrick GetSuppressionContext()->GetSortedSuppressedStacks();
618d89ec533Spatrick if (!suppressed_stacks.empty()) {
619d89ec533Spatrick ForEachChunk(IgnoredSuppressedCb,
620d89ec533Spatrick const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
621d89ec533Spatrick }
6221f9cb04fSpatrick ForEachChunk(CollectIgnoredCb, frontier);
6231f9cb04fSpatrick ProcessGlobalRegions(frontier);
624*810390e3Srobert ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
6251f9cb04fSpatrick ProcessRootRegions(frontier);
6261f9cb04fSpatrick FloodFillTag(frontier, kReachable);
6273cab2bb3Spatrick
6283cab2bb3Spatrick // The check here is relatively expensive, so we do this in a separate flood
6293cab2bb3Spatrick // fill. That way we can skip the check for chunks that are reachable
6303cab2bb3Spatrick // otherwise.
6313cab2bb3Spatrick LOG_POINTERS("Processing platform-specific allocations.\n");
6321f9cb04fSpatrick ProcessPlatformSpecificAllocations(frontier);
6331f9cb04fSpatrick FloodFillTag(frontier, kReachable);
6343cab2bb3Spatrick
6353cab2bb3Spatrick // Iterate over leaked chunks and mark those that are reachable from other
6363cab2bb3Spatrick // leaked chunks.
6373cab2bb3Spatrick LOG_POINTERS("Scanning leaked chunks.\n");
6383cab2bb3Spatrick ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
6393cab2bb3Spatrick }
6403cab2bb3Spatrick
6413cab2bb3Spatrick // ForEachChunk callback. Resets the tags to pre-leak-check state.
ResetTagsCb(uptr chunk,void * arg)6423cab2bb3Spatrick static void ResetTagsCb(uptr chunk, void *arg) {
6433cab2bb3Spatrick (void)arg;
6443cab2bb3Spatrick chunk = GetUserBegin(chunk);
6453cab2bb3Spatrick LsanMetadata m(chunk);
6463cab2bb3Spatrick if (m.allocated() && m.tag() != kIgnored)
6473cab2bb3Spatrick m.set_tag(kDirectlyLeaked);
6483cab2bb3Spatrick }
6493cab2bb3Spatrick
6503cab2bb3Spatrick // ForEachChunk callback. Aggregates information about unreachable chunks into
6513cab2bb3Spatrick // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)6523cab2bb3Spatrick static void CollectLeaksCb(uptr chunk, void *arg) {
6533cab2bb3Spatrick CHECK(arg);
654*810390e3Srobert LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
6553cab2bb3Spatrick chunk = GetUserBegin(chunk);
6563cab2bb3Spatrick LsanMetadata m(chunk);
657*810390e3Srobert if (!m.allocated())
658*810390e3Srobert return;
659*810390e3Srobert if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
660*810390e3Srobert leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
6613cab2bb3Spatrick }
6623cab2bb3Spatrick
PrintMatchedSuppressions()663d89ec533Spatrick void LeakSuppressionContext::PrintMatchedSuppressions() {
6643cab2bb3Spatrick InternalMmapVector<Suppression *> matched;
665d89ec533Spatrick context.GetMatched(&matched);
6663cab2bb3Spatrick if (!matched.size())
6673cab2bb3Spatrick return;
6683cab2bb3Spatrick const char *line = "-----------------------------------------------------";
6693cab2bb3Spatrick Printf("%s\n", line);
6703cab2bb3Spatrick Printf("Suppressions used:\n");
6713cab2bb3Spatrick Printf(" count bytes template\n");
672d89ec533Spatrick for (uptr i = 0; i < matched.size(); i++) {
673d89ec533Spatrick Printf("%7zu %10zu %s\n",
674d89ec533Spatrick static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
675d89ec533Spatrick matched[i]->weight, matched[i]->templ);
676d89ec533Spatrick }
6773cab2bb3Spatrick Printf("%s\n\n", line);
6783cab2bb3Spatrick }
6793cab2bb3Spatrick
6801f9cb04fSpatrick # if SANITIZER_FUCHSIA
6811f9cb04fSpatrick
6821f9cb04fSpatrick // Fuchsia provides a libc interface that guarantees all threads are
6831f9cb04fSpatrick // covered, and SuspendedThreadList is never really used.
ReportUnsuspendedThreads(const SuspendedThreadsList &)6841f9cb04fSpatrick static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
6851f9cb04fSpatrick
6861f9cb04fSpatrick # else // !SANITIZER_FUCHSIA
6871f9cb04fSpatrick
ReportUnsuspendedThreads(const SuspendedThreadsList & suspended_threads)6883cab2bb3Spatrick static void ReportUnsuspendedThreads(
6893cab2bb3Spatrick const SuspendedThreadsList &suspended_threads) {
6903cab2bb3Spatrick InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
6913cab2bb3Spatrick for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
6923cab2bb3Spatrick threads[i] = suspended_threads.GetThreadID(i);
6933cab2bb3Spatrick
6943cab2bb3Spatrick Sort(threads.data(), threads.size());
6953cab2bb3Spatrick
696*810390e3Srobert InternalMmapVector<tid_t> unsuspended;
697*810390e3Srobert GetRunningThreadsLocked(&unsuspended);
698*810390e3Srobert
699*810390e3Srobert for (auto os_id : unsuspended) {
700*810390e3Srobert uptr i = InternalLowerBound(threads, os_id);
701*810390e3Srobert if (i >= threads.size() || threads[i] != os_id)
702*810390e3Srobert Report(
703*810390e3Srobert "Running thread %zu was not suspended. False leaks are possible.\n",
704*810390e3Srobert os_id);
705*810390e3Srobert }
7063cab2bb3Spatrick }
7073cab2bb3Spatrick
7081f9cb04fSpatrick # endif // !SANITIZER_FUCHSIA
7091f9cb04fSpatrick
CheckForLeaksCallback(const SuspendedThreadsList & suspended_threads,void * arg)7103cab2bb3Spatrick static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
7113cab2bb3Spatrick void *arg) {
7123cab2bb3Spatrick CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
7133cab2bb3Spatrick CHECK(param);
7143cab2bb3Spatrick CHECK(!param->success);
7153cab2bb3Spatrick ReportUnsuspendedThreads(suspended_threads);
716*810390e3Srobert ClassifyAllChunks(suspended_threads, ¶m->frontier, param->caller_tid,
717*810390e3Srobert param->caller_sp);
718*810390e3Srobert ForEachChunk(CollectLeaksCb, ¶m->leaks);
7193cab2bb3Spatrick // Clean up for subsequent leak checks. This assumes we did not overwrite any
7203cab2bb3Spatrick // kIgnored tags.
7213cab2bb3Spatrick ForEachChunk(ResetTagsCb, nullptr);
7223cab2bb3Spatrick param->success = true;
7233cab2bb3Spatrick }
7243cab2bb3Spatrick
PrintResults(LeakReport & report)725d89ec533Spatrick static bool PrintResults(LeakReport &report) {
726d89ec533Spatrick uptr unsuppressed_count = report.UnsuppressedLeakCount();
727d89ec533Spatrick if (unsuppressed_count) {
728d89ec533Spatrick Decorator d;
729d89ec533Spatrick Printf(
730d89ec533Spatrick "\n"
731d89ec533Spatrick "================================================================="
732d89ec533Spatrick "\n");
733d89ec533Spatrick Printf("%s", d.Error());
734d89ec533Spatrick Report("ERROR: LeakSanitizer: detected memory leaks\n");
735d89ec533Spatrick Printf("%s", d.Default());
736d89ec533Spatrick report.ReportTopLeaks(flags()->max_leaks);
737d89ec533Spatrick }
738d89ec533Spatrick if (common_flags()->print_suppressions)
739d89ec533Spatrick GetSuppressionContext()->PrintMatchedSuppressions();
740d89ec533Spatrick if (unsuppressed_count > 0) {
741d89ec533Spatrick report.PrintSummary();
742d89ec533Spatrick return true;
743d89ec533Spatrick }
744d89ec533Spatrick return false;
745d89ec533Spatrick }
746d89ec533Spatrick
CheckForLeaks()7473cab2bb3Spatrick static bool CheckForLeaks() {
748*810390e3Srobert if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
749*810390e3Srobert VReport(1, "LeakSanitizer is disabled");
7503cab2bb3Spatrick return false;
751*810390e3Srobert }
752*810390e3Srobert VReport(1, "LeakSanitizer: checking for leaks");
753d89ec533Spatrick // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
754d89ec533Spatrick // suppressions. However if a stack id was previously suppressed, it should be
755d89ec533Spatrick // suppressed in future checks as well.
756d89ec533Spatrick for (int i = 0;; ++i) {
7573cab2bb3Spatrick EnsureMainThreadIDIsCorrect();
7583cab2bb3Spatrick CheckForLeaksParam param;
759*810390e3Srobert // Capture calling thread's stack pointer early, to avoid false negatives.
760*810390e3Srobert // Old frame with dead pointers might be overlapped by new frame inside
761*810390e3Srobert // CheckForLeaks which does not use bytes with pointers before the
762*810390e3Srobert // threads are suspended and stack pointers captured.
763*810390e3Srobert param.caller_tid = GetTid();
764*810390e3Srobert param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
7653cab2bb3Spatrick LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
7663cab2bb3Spatrick if (!param.success) {
7673cab2bb3Spatrick Report("LeakSanitizer has encountered a fatal error.\n");
7683cab2bb3Spatrick Report(
7693cab2bb3Spatrick "HINT: For debugging, try setting environment variable "
7703cab2bb3Spatrick "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
7713cab2bb3Spatrick Report(
772d89ec533Spatrick "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
773d89ec533Spatrick "etc)\n");
7743cab2bb3Spatrick Die();
7753cab2bb3Spatrick }
776*810390e3Srobert LeakReport leak_report;
777*810390e3Srobert leak_report.AddLeakedChunks(param.leaks);
778*810390e3Srobert
779d89ec533Spatrick // No new suppressions stacks, so rerun will not help and we can report.
780*810390e3Srobert if (!leak_report.ApplySuppressions())
781*810390e3Srobert return PrintResults(leak_report);
782d89ec533Spatrick
783d89ec533Spatrick // No indirect leaks to report, so we are done here.
784*810390e3Srobert if (!leak_report.IndirectUnsuppressedLeakCount())
785*810390e3Srobert return PrintResults(leak_report);
786d89ec533Spatrick
787d89ec533Spatrick if (i >= 8) {
788d89ec533Spatrick Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
789*810390e3Srobert return PrintResults(leak_report);
7903cab2bb3Spatrick }
791d89ec533Spatrick
792d89ec533Spatrick // We found a new previously unseen suppressed call stack. Rerun to make
793d89ec533Spatrick // sure it does not hold indirect leaks.
794d89ec533Spatrick VReport(1, "Rerun with %zu suppressed stacks.",
795d89ec533Spatrick GetSuppressionContext()->GetSortedSuppressedStacks().size());
7963cab2bb3Spatrick }
7973cab2bb3Spatrick }
7983cab2bb3Spatrick
7993cab2bb3Spatrick static bool has_reported_leaks = false;
HasReportedLeaks()8003cab2bb3Spatrick bool HasReportedLeaks() { return has_reported_leaks; }
8013cab2bb3Spatrick
DoLeakCheck()8023cab2bb3Spatrick void DoLeakCheck() {
803*810390e3Srobert Lock l(&global_mutex);
8043cab2bb3Spatrick static bool already_done;
805*810390e3Srobert if (already_done)
806*810390e3Srobert return;
8073cab2bb3Spatrick already_done = true;
8083cab2bb3Spatrick has_reported_leaks = CheckForLeaks();
809*810390e3Srobert if (has_reported_leaks)
810*810390e3Srobert HandleLeaks();
8113cab2bb3Spatrick }
8123cab2bb3Spatrick
DoRecoverableLeakCheck()8133cab2bb3Spatrick static int DoRecoverableLeakCheck() {
814*810390e3Srobert Lock l(&global_mutex);
8153cab2bb3Spatrick bool have_leaks = CheckForLeaks();
8163cab2bb3Spatrick return have_leaks ? 1 : 0;
8173cab2bb3Spatrick }
8183cab2bb3Spatrick
DoRecoverableLeakCheckVoid()8193cab2bb3Spatrick void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
8203cab2bb3Spatrick
8213cab2bb3Spatrick ///// LeakReport implementation. /////
8223cab2bb3Spatrick
8233cab2bb3Spatrick // A hard limit on the number of distinct leaks, to avoid quadratic complexity
8243cab2bb3Spatrick // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
8253cab2bb3Spatrick // in real-world applications.
826*810390e3Srobert // FIXME: Get rid of this limit by moving logic into DedupLeaks.
8273cab2bb3Spatrick const uptr kMaxLeaksConsidered = 5000;
8283cab2bb3Spatrick
AddLeakedChunks(const LeakedChunks & chunks)829*810390e3Srobert void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
830*810390e3Srobert for (const LeakedChunk &leak : chunks) {
831*810390e3Srobert uptr chunk = leak.chunk;
832*810390e3Srobert u32 stack_trace_id = leak.stack_trace_id;
833*810390e3Srobert uptr leaked_size = leak.leaked_size;
834*810390e3Srobert ChunkTag tag = leak.tag;
8353cab2bb3Spatrick CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
836*810390e3Srobert
837*810390e3Srobert if (u32 resolution = flags()->resolution) {
838*810390e3Srobert StackTrace stack = StackDepotGet(stack_trace_id);
839*810390e3Srobert stack.size = Min(stack.size, resolution);
840*810390e3Srobert stack_trace_id = StackDepotPut(stack);
841*810390e3Srobert }
842*810390e3Srobert
8433cab2bb3Spatrick bool is_directly_leaked = (tag == kDirectlyLeaked);
8443cab2bb3Spatrick uptr i;
8453cab2bb3Spatrick for (i = 0; i < leaks_.size(); i++) {
8463cab2bb3Spatrick if (leaks_[i].stack_trace_id == stack_trace_id &&
8473cab2bb3Spatrick leaks_[i].is_directly_leaked == is_directly_leaked) {
8483cab2bb3Spatrick leaks_[i].hit_count++;
8493cab2bb3Spatrick leaks_[i].total_size += leaked_size;
8503cab2bb3Spatrick break;
8513cab2bb3Spatrick }
8523cab2bb3Spatrick }
8533cab2bb3Spatrick if (i == leaks_.size()) {
854*810390e3Srobert if (leaks_.size() == kMaxLeaksConsidered)
855*810390e3Srobert return;
856*810390e3Srobert Leak leak = {next_id_++, /* hit_count */ 1,
857*810390e3Srobert leaked_size, stack_trace_id,
8583cab2bb3Spatrick is_directly_leaked, /* is_suppressed */ false};
8593cab2bb3Spatrick leaks_.push_back(leak);
8603cab2bb3Spatrick }
8613cab2bb3Spatrick if (flags()->report_objects) {
8623cab2bb3Spatrick LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
8633cab2bb3Spatrick leaked_objects_.push_back(obj);
8643cab2bb3Spatrick }
8653cab2bb3Spatrick }
866*810390e3Srobert }
8673cab2bb3Spatrick
LeakComparator(const Leak & leak1,const Leak & leak2)8683cab2bb3Spatrick static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
8693cab2bb3Spatrick if (leak1.is_directly_leaked == leak2.is_directly_leaked)
8703cab2bb3Spatrick return leak1.total_size > leak2.total_size;
8713cab2bb3Spatrick else
8723cab2bb3Spatrick return leak1.is_directly_leaked;
8733cab2bb3Spatrick }
8743cab2bb3Spatrick
ReportTopLeaks(uptr num_leaks_to_report)8753cab2bb3Spatrick void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
8763cab2bb3Spatrick CHECK(leaks_.size() <= kMaxLeaksConsidered);
8773cab2bb3Spatrick Printf("\n");
8783cab2bb3Spatrick if (leaks_.size() == kMaxLeaksConsidered)
879*810390e3Srobert Printf(
880*810390e3Srobert "Too many leaks! Only the first %zu leaks encountered will be "
8813cab2bb3Spatrick "reported.\n",
8823cab2bb3Spatrick kMaxLeaksConsidered);
8833cab2bb3Spatrick
8843cab2bb3Spatrick uptr unsuppressed_count = UnsuppressedLeakCount();
8853cab2bb3Spatrick if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
8863cab2bb3Spatrick Printf("The %zu top leak(s):\n", num_leaks_to_report);
8873cab2bb3Spatrick Sort(leaks_.data(), leaks_.size(), &LeakComparator);
8883cab2bb3Spatrick uptr leaks_reported = 0;
8893cab2bb3Spatrick for (uptr i = 0; i < leaks_.size(); i++) {
890*810390e3Srobert if (leaks_[i].is_suppressed)
891*810390e3Srobert continue;
8923cab2bb3Spatrick PrintReportForLeak(i);
8933cab2bb3Spatrick leaks_reported++;
894*810390e3Srobert if (leaks_reported == num_leaks_to_report)
895*810390e3Srobert break;
8963cab2bb3Spatrick }
8973cab2bb3Spatrick if (leaks_reported < unsuppressed_count) {
8983cab2bb3Spatrick uptr remaining = unsuppressed_count - leaks_reported;
8993cab2bb3Spatrick Printf("Omitting %zu more leak(s).\n", remaining);
9003cab2bb3Spatrick }
9013cab2bb3Spatrick }
9023cab2bb3Spatrick
PrintReportForLeak(uptr index)9033cab2bb3Spatrick void LeakReport::PrintReportForLeak(uptr index) {
9043cab2bb3Spatrick Decorator d;
9053cab2bb3Spatrick Printf("%s", d.Leak());
9063cab2bb3Spatrick Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
9073cab2bb3Spatrick leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
9083cab2bb3Spatrick leaks_[index].total_size, leaks_[index].hit_count);
9093cab2bb3Spatrick Printf("%s", d.Default());
9103cab2bb3Spatrick
911*810390e3Srobert CHECK(leaks_[index].stack_trace_id);
912*810390e3Srobert StackDepotGet(leaks_[index].stack_trace_id).Print();
9133cab2bb3Spatrick
9143cab2bb3Spatrick if (flags()->report_objects) {
9153cab2bb3Spatrick Printf("Objects leaked above:\n");
9163cab2bb3Spatrick PrintLeakedObjectsForLeak(index);
9173cab2bb3Spatrick Printf("\n");
9183cab2bb3Spatrick }
9193cab2bb3Spatrick }
9203cab2bb3Spatrick
PrintLeakedObjectsForLeak(uptr index)9213cab2bb3Spatrick void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
9223cab2bb3Spatrick u32 leak_id = leaks_[index].id;
9233cab2bb3Spatrick for (uptr j = 0; j < leaked_objects_.size(); j++) {
9243cab2bb3Spatrick if (leaked_objects_[j].leak_id == leak_id)
925*810390e3Srobert Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
9263cab2bb3Spatrick leaked_objects_[j].size);
9273cab2bb3Spatrick }
9283cab2bb3Spatrick }
9293cab2bb3Spatrick
PrintSummary()9303cab2bb3Spatrick void LeakReport::PrintSummary() {
9313cab2bb3Spatrick CHECK(leaks_.size() <= kMaxLeaksConsidered);
9323cab2bb3Spatrick uptr bytes = 0, allocations = 0;
9333cab2bb3Spatrick for (uptr i = 0; i < leaks_.size(); i++) {
934*810390e3Srobert if (leaks_[i].is_suppressed)
935*810390e3Srobert continue;
9363cab2bb3Spatrick bytes += leaks_[i].total_size;
9373cab2bb3Spatrick allocations += leaks_[i].hit_count;
9383cab2bb3Spatrick }
939d89ec533Spatrick InternalScopedString summary;
9403cab2bb3Spatrick summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
9413cab2bb3Spatrick allocations);
9423cab2bb3Spatrick ReportErrorSummary(summary.data());
9433cab2bb3Spatrick }
9443cab2bb3Spatrick
ApplySuppressions()945d89ec533Spatrick uptr LeakReport::ApplySuppressions() {
946d89ec533Spatrick LeakSuppressionContext *suppressions = GetSuppressionContext();
947d89ec533Spatrick uptr new_suppressions = false;
9483cab2bb3Spatrick for (uptr i = 0; i < leaks_.size(); i++) {
949*810390e3Srobert if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
950*810390e3Srobert leaks_[i].total_size)) {
9513cab2bb3Spatrick leaks_[i].is_suppressed = true;
952d89ec533Spatrick ++new_suppressions;
9533cab2bb3Spatrick }
9543cab2bb3Spatrick }
955d89ec533Spatrick return new_suppressions;
9563cab2bb3Spatrick }
9573cab2bb3Spatrick
UnsuppressedLeakCount()9583cab2bb3Spatrick uptr LeakReport::UnsuppressedLeakCount() {
9593cab2bb3Spatrick uptr result = 0;
9603cab2bb3Spatrick for (uptr i = 0; i < leaks_.size(); i++)
961*810390e3Srobert if (!leaks_[i].is_suppressed)
962*810390e3Srobert result++;
9633cab2bb3Spatrick return result;
9643cab2bb3Spatrick }
9653cab2bb3Spatrick
IndirectUnsuppressedLeakCount()966d89ec533Spatrick uptr LeakReport::IndirectUnsuppressedLeakCount() {
967d89ec533Spatrick uptr result = 0;
968d89ec533Spatrick for (uptr i = 0; i < leaks_.size(); i++)
969d89ec533Spatrick if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
970d89ec533Spatrick result++;
971d89ec533Spatrick return result;
972d89ec533Spatrick }
973d89ec533Spatrick
9743cab2bb3Spatrick } // namespace __lsan
9753cab2bb3Spatrick #else // CAN_SANITIZE_LEAKS
9763cab2bb3Spatrick namespace __lsan {
InitCommonLsan()9773cab2bb3Spatrick void InitCommonLsan() {}
DoLeakCheck()9783cab2bb3Spatrick void DoLeakCheck() {}
DoRecoverableLeakCheckVoid()9793cab2bb3Spatrick void DoRecoverableLeakCheckVoid() {}
DisableInThisThread()9803cab2bb3Spatrick void DisableInThisThread() {}
EnableInThisThread()9813cab2bb3Spatrick void EnableInThisThread() {}
982*810390e3Srobert } // namespace __lsan
9833cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
9843cab2bb3Spatrick
9853cab2bb3Spatrick using namespace __lsan;
9863cab2bb3Spatrick
9873cab2bb3Spatrick extern "C" {
9883cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)9893cab2bb3Spatrick void __lsan_ignore_object(const void *p) {
9903cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
9913cab2bb3Spatrick if (!common_flags()->detect_leaks)
9923cab2bb3Spatrick return;
9933cab2bb3Spatrick // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
9943cab2bb3Spatrick // locked.
995*810390e3Srobert Lock l(&global_mutex);
9963cab2bb3Spatrick IgnoreObjectResult res = IgnoreObjectLocked(p);
9973cab2bb3Spatrick if (res == kIgnoreObjectInvalid)
998*810390e3Srobert VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
9993cab2bb3Spatrick if (res == kIgnoreObjectAlreadyIgnored)
1000*810390e3Srobert VReport(1,
1001*810390e3Srobert "__lsan_ignore_object(): "
1002*810390e3Srobert "heap object at %p is already being ignored\n",
1003*810390e3Srobert p);
10043cab2bb3Spatrick if (res == kIgnoreObjectSuccess)
10053cab2bb3Spatrick VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
10063cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
10073cab2bb3Spatrick }
10083cab2bb3Spatrick
10093cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)10103cab2bb3Spatrick void __lsan_register_root_region(const void *begin, uptr size) {
10113cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
1012*810390e3Srobert Lock l(&global_mutex);
10133cab2bb3Spatrick RootRegion region = {reinterpret_cast<uptr>(begin), size};
1014*810390e3Srobert root_regions.push_back(region);
1015*810390e3Srobert VReport(1, "Registered root region at %p of size %zu\n", begin, size);
10163cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
10173cab2bb3Spatrick }
10183cab2bb3Spatrick
10193cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)10203cab2bb3Spatrick void __lsan_unregister_root_region(const void *begin, uptr size) {
10213cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
1022*810390e3Srobert Lock l(&global_mutex);
10233cab2bb3Spatrick bool removed = false;
1024*810390e3Srobert for (uptr i = 0; i < root_regions.size(); i++) {
1025*810390e3Srobert RootRegion region = root_regions[i];
10263cab2bb3Spatrick if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
10273cab2bb3Spatrick removed = true;
1028*810390e3Srobert uptr last_index = root_regions.size() - 1;
1029*810390e3Srobert root_regions[i] = root_regions[last_index];
1030*810390e3Srobert root_regions.pop_back();
1031*810390e3Srobert VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
10323cab2bb3Spatrick break;
10333cab2bb3Spatrick }
10343cab2bb3Spatrick }
10353cab2bb3Spatrick if (!removed) {
10363cab2bb3Spatrick Report(
1037*810390e3Srobert "__lsan_unregister_root_region(): region at %p of size %zu has not "
10383cab2bb3Spatrick "been registered.\n",
10393cab2bb3Spatrick begin, size);
10403cab2bb3Spatrick Die();
10413cab2bb3Spatrick }
10423cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
10433cab2bb3Spatrick }
10443cab2bb3Spatrick
10453cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()10463cab2bb3Spatrick void __lsan_disable() {
10473cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
10483cab2bb3Spatrick __lsan::DisableInThisThread();
10493cab2bb3Spatrick #endif
10503cab2bb3Spatrick }
10513cab2bb3Spatrick
10523cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()10533cab2bb3Spatrick void __lsan_enable() {
10543cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
10553cab2bb3Spatrick __lsan::EnableInThisThread();
10563cab2bb3Spatrick #endif
10573cab2bb3Spatrick }
10583cab2bb3Spatrick
10593cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()10603cab2bb3Spatrick void __lsan_do_leak_check() {
10613cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
10623cab2bb3Spatrick if (common_flags()->detect_leaks)
10633cab2bb3Spatrick __lsan::DoLeakCheck();
10643cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
10653cab2bb3Spatrick }
10663cab2bb3Spatrick
10673cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_recoverable_leak_check()10683cab2bb3Spatrick int __lsan_do_recoverable_leak_check() {
10693cab2bb3Spatrick #if CAN_SANITIZE_LEAKS
10703cab2bb3Spatrick if (common_flags()->detect_leaks)
10713cab2bb3Spatrick return __lsan::DoRecoverableLeakCheck();
10723cab2bb3Spatrick #endif // CAN_SANITIZE_LEAKS
10733cab2bb3Spatrick return 0;
10743cab2bb3Spatrick }
10753cab2bb3Spatrick
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_options,void)1076d89ec533Spatrick SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
10773cab2bb3Spatrick return "";
10783cab2bb3Spatrick }
10793cab2bb3Spatrick
1080d89ec533Spatrick #if !SANITIZER_SUPPORTS_WEAK_HOOKS
SANITIZER_INTERFACE_WEAK_DEF(int,__lsan_is_turned_off,void)1081*810390e3Srobert SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
10823cab2bb3Spatrick return 0;
10833cab2bb3Spatrick }
10843cab2bb3Spatrick
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_suppressions,void)1085*810390e3Srobert SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
10863cab2bb3Spatrick return "";
10873cab2bb3Spatrick }
10883cab2bb3Spatrick #endif
10893cab2bb3Spatrick } // extern "C"
1090