xref: /llvm-project/compiler-rt/lib/lsan/lsan_common.cpp (revision ac38ab594f1b7c9a9f67007f7684e4d46b2588be)
1ae1fc9baSNico Weber //=-- lsan_common.cpp -----------------------------------------------------===//
2ae1fc9baSNico Weber //
3ae1fc9baSNico Weber // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4ae1fc9baSNico Weber // See https://llvm.org/LICENSE.txt for license information.
5ae1fc9baSNico Weber // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6ae1fc9baSNico Weber //
7ae1fc9baSNico Weber //===----------------------------------------------------------------------===//
8ae1fc9baSNico Weber //
9ae1fc9baSNico Weber // This file is a part of LeakSanitizer.
10ae1fc9baSNico Weber // Implementation of common leak checking functionality.
11ae1fc9baSNico Weber //
12ae1fc9baSNico Weber //===----------------------------------------------------------------------===//
13ae1fc9baSNico Weber 
14ae1fc9baSNico Weber #include "lsan_common.h"
15ae1fc9baSNico Weber 
16ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_common.h"
17ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_flag_parser.h"
18ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_flags.h"
19ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_placement_new.h"
20ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_procmaps.h"
21ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_report_decorator.h"
22ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_stackdepot.h"
23ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_stacktrace.h"
24ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_suppressions.h"
25ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_thread_registry.h"
26ae1fc9baSNico Weber #include "sanitizer_common/sanitizer_tls_get_addr.h"
27ae1fc9baSNico Weber 
28ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
29f458d9f6SLeonard Grey 
30f458d9f6SLeonard Grey #  if SANITIZER_APPLE
31f458d9f6SLeonard Grey // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
32f458d9f6SLeonard Grey #    if SANITIZER_IOS && !SANITIZER_IOSSIM
33f458d9f6SLeonard Grey #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
34f458d9f6SLeonard Grey #    else
35f458d9f6SLeonard Grey #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
36f458d9f6SLeonard Grey #    endif
37f458d9f6SLeonard Grey #  endif
38f458d9f6SLeonard Grey 
39ae1fc9baSNico Weber namespace __lsan {
40ae1fc9baSNico Weber 
41ae1fc9baSNico Weber // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
42ae1fc9baSNico Weber // also to protect the global list of root regions.
43e4b02c61SVitaly Buka static Mutex global_mutex;
44ae1fc9baSNico Weber 
4514d7e0bbSVitaly Buka void LockGlobal() SANITIZER_ACQUIRE(global_mutex) { global_mutex.Lock(); }
4614d7e0bbSVitaly Buka void UnlockGlobal() SANITIZER_RELEASE(global_mutex) { global_mutex.Unlock(); }
4714d7e0bbSVitaly Buka 
48ae1fc9baSNico Weber Flags lsan_flags;
49ae1fc9baSNico Weber 
50ae1fc9baSNico Weber void DisableCounterUnderflow() {
51ae1fc9baSNico Weber   if (common_flags()->detect_leaks) {
52ae1fc9baSNico Weber     Report("Unmatched call to __lsan_enable().\n");
53ae1fc9baSNico Weber     Die();
54ae1fc9baSNico Weber   }
55ae1fc9baSNico Weber }
56ae1fc9baSNico Weber 
57ae1fc9baSNico Weber void Flags::SetDefaults() {
58ae1fc9baSNico Weber #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
59ae1fc9baSNico Weber #  include "lsan_flags.inc"
60ae1fc9baSNico Weber #  undef LSAN_FLAG
61ae1fc9baSNico Weber }
62ae1fc9baSNico Weber 
63ae1fc9baSNico Weber void RegisterLsanFlags(FlagParser *parser, Flags *f) {
64ae1fc9baSNico Weber #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
65ae1fc9baSNico Weber     RegisterFlag(parser, #Name, Description, &f->Name);
66ae1fc9baSNico Weber #  include "lsan_flags.inc"
67ae1fc9baSNico Weber #  undef LSAN_FLAG
68ae1fc9baSNico Weber }
69ae1fc9baSNico Weber 
70ae1fc9baSNico Weber #  define LOG_POINTERS(...)      \
71ae1fc9baSNico Weber     do {                         \
72b79ea567SVitaly Buka       if (flags()->log_pointers) \
73b79ea567SVitaly Buka         Report(__VA_ARGS__);     \
74ae1fc9baSNico Weber     } while (0)
75ae1fc9baSNico Weber 
76ae1fc9baSNico Weber #  define LOG_THREADS(...)      \
77ae1fc9baSNico Weber     do {                        \
78b79ea567SVitaly Buka       if (flags()->log_threads) \
79b79ea567SVitaly Buka         Report(__VA_ARGS__);    \
80ae1fc9baSNico Weber     } while (0)
81ae1fc9baSNico Weber 
829a5261efSVitaly Buka class LeakSuppressionContext {
839a5261efSVitaly Buka   bool parsed = false;
849a5261efSVitaly Buka   SuppressionContext context;
859a023701SVitaly Buka   bool suppressed_stacks_sorted = true;
869a023701SVitaly Buka   InternalMmapVector<u32> suppressed_stacks;
87f86deb18SVitaly Buka   const LoadedModule *suppress_module = nullptr;
889a5261efSVitaly Buka 
899a023701SVitaly Buka   void LazyInit();
90f86deb18SVitaly Buka   Suppression *GetSuppressionForAddr(uptr addr);
91f86deb18SVitaly Buka   bool SuppressInvalid(const StackTrace &stack);
92a9a14990SVitaly Buka   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
939a5261efSVitaly Buka 
949a5261efSVitaly Buka  public:
959a5261efSVitaly Buka   LeakSuppressionContext(const char *supprression_types[],
969a5261efSVitaly Buka                          int suppression_types_num)
979a5261efSVitaly Buka       : context(supprression_types, suppression_types_num) {}
989a5261efSVitaly Buka 
99a9a14990SVitaly Buka   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
1009a023701SVitaly Buka 
1019a023701SVitaly Buka   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
1029a023701SVitaly Buka     if (!suppressed_stacks_sorted) {
1039a023701SVitaly Buka       suppressed_stacks_sorted = true;
1049a023701SVitaly Buka       SortAndDedup(suppressed_stacks);
1059a023701SVitaly Buka     }
1069a023701SVitaly Buka     return suppressed_stacks;
1079a023701SVitaly Buka   }
1089a5261efSVitaly Buka   void PrintMatchedSuppressions();
1099a5261efSVitaly Buka };
1109a5261efSVitaly Buka 
111ba66d60bSFangrui Song alignas(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
1129a5261efSVitaly Buka static LeakSuppressionContext *suppression_ctx = nullptr;
113ae1fc9baSNico Weber static const char kSuppressionLeak[] = "leak";
114ae1fc9baSNico Weber static const char *kSuppressionTypes[] = {kSuppressionLeak};
115ae1fc9baSNico Weber static const char kStdSuppressions[] =
116ae1fc9baSNico Weber #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
117ae1fc9baSNico Weber     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
118ae1fc9baSNico Weber     // definition.
119ae1fc9baSNico Weber     "leak:*pthread_exit*\n"
120ae1fc9baSNico Weber #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
1218246b2e1SMariusz Borsa #  if SANITIZER_APPLE
122ae1fc9baSNico Weber     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
123ae1fc9baSNico Weber     "leak:*_os_trace*\n"
124ae1fc9baSNico Weber #  endif
125ae1fc9baSNico Weber     // TLS leak in some glibc versions, described in
126ae1fc9baSNico Weber     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
127ae1fc9baSNico Weber     "leak:*tls_get_addr*\n";
128ae1fc9baSNico Weber 
129ae1fc9baSNico Weber void InitializeSuppressions() {
130ae1fc9baSNico Weber   CHECK_EQ(nullptr, suppression_ctx);
131c0fa6322SVitaly Buka   suppression_ctx = new (suppression_placeholder)
1329a5261efSVitaly Buka       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
133ae1fc9baSNico Weber }
134ae1fc9baSNico Weber 
1359a023701SVitaly Buka void LeakSuppressionContext::LazyInit() {
1369a5261efSVitaly Buka   if (!parsed) {
1379a5261efSVitaly Buka     parsed = true;
1389a5261efSVitaly Buka     context.ParseFromFile(flags()->suppressions);
1399a5261efSVitaly Buka     if (&__lsan_default_suppressions)
1409a5261efSVitaly Buka       context.Parse(__lsan_default_suppressions());
1419a5261efSVitaly Buka     context.Parse(kStdSuppressions);
142f86deb18SVitaly Buka     if (flags()->use_tls && flags()->use_ld_allocations)
143f86deb18SVitaly Buka       suppress_module = GetLinker();
1449a5261efSVitaly Buka   }
1459a5261efSVitaly Buka }
1469a5261efSVitaly Buka 
147dd1abb11SVitaly Buka Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
148dd1abb11SVitaly Buka   Suppression *s = nullptr;
149dd1abb11SVitaly Buka 
150dd1abb11SVitaly Buka   // Suppress by module name.
151a63932a8SVitaly Buka   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
152a63932a8SVitaly Buka   if (!module_name)
153a63932a8SVitaly Buka     module_name = "<unknown module>";
154dd1abb11SVitaly Buka   if (context.Match(module_name, kSuppressionLeak, &s))
155dd1abb11SVitaly Buka     return s;
156dd1abb11SVitaly Buka 
157dd1abb11SVitaly Buka   // Suppress by file or function name.
15823aabdd6SVitaly Buka   SymbolizedStackHolder symbolized_stack(
15923aabdd6SVitaly Buka       Symbolizer::GetOrInit()->SymbolizePC(addr));
16023aabdd6SVitaly Buka   const SymbolizedStack *frames = symbolized_stack.get();
16123aabdd6SVitaly Buka   for (const SymbolizedStack *cur = frames; cur; cur = cur->next) {
162dd1abb11SVitaly Buka     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
163dd1abb11SVitaly Buka         context.Match(cur->info.file, kSuppressionLeak, &s)) {
164dd1abb11SVitaly Buka       break;
165dd1abb11SVitaly Buka     }
166dd1abb11SVitaly Buka   }
167dd1abb11SVitaly Buka   return s;
168dd1abb11SVitaly Buka }
169dd1abb11SVitaly Buka 
170f86deb18SVitaly Buka static uptr GetCallerPC(const StackTrace &stack) {
171f86deb18SVitaly Buka   // The top frame is our malloc/calloc/etc. The next frame is the caller.
172f86deb18SVitaly Buka   if (stack.size >= 2)
173f86deb18SVitaly Buka     return stack.trace[1];
174f86deb18SVitaly Buka   return 0;
175f86deb18SVitaly Buka }
176f86deb18SVitaly Buka 
177f458d9f6SLeonard Grey #  if SANITIZER_APPLE
178ac604cc3SLeonard Grey // Several pointers in the Objective-C runtime (method cache and class_rw_t,
179ac604cc3SLeonard Grey // for example) are tagged with additional bits we need to strip.
180ac604cc3SLeonard Grey static inline void *TransformPointer(void *p) {
181f458d9f6SLeonard Grey   uptr ptr = reinterpret_cast<uptr>(p);
182ac604cc3SLeonard Grey   return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
183f458d9f6SLeonard Grey }
184f458d9f6SLeonard Grey #  endif
185f458d9f6SLeonard Grey 
186f86deb18SVitaly Buka // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
187f86deb18SVitaly Buka // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
188f86deb18SVitaly Buka // modules accounting etc.
189f86deb18SVitaly Buka // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
190f86deb18SVitaly Buka // They are allocated with a __libc_memalign() call in allocate_and_init()
191f86deb18SVitaly Buka // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
192f86deb18SVitaly Buka // blocks, but we can make sure they come from our own allocator by intercepting
193f86deb18SVitaly Buka // __libc_memalign(). On top of that, there is no easy way to reach them. Their
194f86deb18SVitaly Buka // addresses are stored in a dynamically allocated array (the DTV) which is
195f86deb18SVitaly Buka // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
196f86deb18SVitaly Buka // being reachable from the static TLS, and the dynamic TLS being reachable from
197f86deb18SVitaly Buka // the DTV. This is because the initial DTV is allocated before our interception
198f86deb18SVitaly Buka // mechanism kicks in, and thus we don't recognize it as allocated memory. We
199f86deb18SVitaly Buka // can't special-case it either, since we don't know its size.
200f86deb18SVitaly Buka // Our solution is to include in the root set all allocations made from
201f86deb18SVitaly Buka // ld-linux.so (which is where allocate_and_init() is implemented). This is
202f86deb18SVitaly Buka // guaranteed to include all dynamic TLS blocks (and possibly other allocations
203f86deb18SVitaly Buka // which we don't care about).
204f86deb18SVitaly Buka // On all other platforms, this simply checks to ensure that the caller pc is
205f86deb18SVitaly Buka // valid before reporting chunks as leaked.
206f86deb18SVitaly Buka bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
207f86deb18SVitaly Buka   uptr caller_pc = GetCallerPC(stack);
208f86deb18SVitaly Buka   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
209f86deb18SVitaly Buka   // it as reachable, as we can't properly report its allocation stack anyway.
210f86deb18SVitaly Buka   return !caller_pc ||
211f86deb18SVitaly Buka          (suppress_module && suppress_module->containsAddress(caller_pc));
212f86deb18SVitaly Buka }
213f86deb18SVitaly Buka 
214a9a14990SVitaly Buka bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
215a9a14990SVitaly Buka                                             uptr hit_count, uptr total_size) {
216dd1abb11SVitaly Buka   for (uptr i = 0; i < stack.size; i++) {
217dd1abb11SVitaly Buka     Suppression *s = GetSuppressionForAddr(
218dd1abb11SVitaly Buka         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
219dd1abb11SVitaly Buka     if (s) {
220a9a14990SVitaly Buka       s->weight += total_size;
221a9a14990SVitaly Buka       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
222a9a14990SVitaly Buka       return true;
223a9a14990SVitaly Buka     }
224a9a14990SVitaly Buka   }
225a9a14990SVitaly Buka   return false;
226a9a14990SVitaly Buka }
227a9a14990SVitaly Buka 
228a9a14990SVitaly Buka bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
229a9a14990SVitaly Buka                                       uptr total_size) {
230a9a14990SVitaly Buka   LazyInit();
231a9a14990SVitaly Buka   StackTrace stack = StackDepotGet(stack_trace_id);
232f86deb18SVitaly Buka   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
233a9a14990SVitaly Buka     return false;
234dd1abb11SVitaly Buka   suppressed_stacks_sorted = false;
235dd1abb11SVitaly Buka   suppressed_stacks.push_back(stack_trace_id);
236a9a14990SVitaly Buka   return true;
237dd1abb11SVitaly Buka }
238dd1abb11SVitaly Buka 
2399a5261efSVitaly Buka static LeakSuppressionContext *GetSuppressionContext() {
240ae1fc9baSNico Weber   CHECK(suppression_ctx);
241ae1fc9baSNico Weber   return suppression_ctx;
242ae1fc9baSNico Weber }
243ae1fc9baSNico Weber 
244ae1fc9baSNico Weber void InitCommonLsan() {
245ae1fc9baSNico Weber   if (common_flags()->detect_leaks) {
246ae1fc9baSNico Weber     // Initialization which can fail or print warnings should only be done if
247ae1fc9baSNico Weber     // LSan is actually enabled.
248ae1fc9baSNico Weber     InitializeSuppressions();
249ae1fc9baSNico Weber     InitializePlatformSpecificModules();
250ae1fc9baSNico Weber   }
251ae1fc9baSNico Weber }
252ae1fc9baSNico Weber 
253ae1fc9baSNico Weber class Decorator : public __sanitizer::SanitizerCommonDecorator {
254ae1fc9baSNico Weber  public:
255ae1fc9baSNico Weber   Decorator() : SanitizerCommonDecorator() {}
256ae1fc9baSNico Weber   const char *Error() { return Red(); }
257ae1fc9baSNico Weber   const char *Leak() { return Blue(); }
258ae1fc9baSNico Weber };
259ae1fc9baSNico Weber 
260c8644ea8SLeonard Chan static inline bool MaybeUserPointer(uptr p) {
261ae1fc9baSNico Weber   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
262ae1fc9baSNico Weber   // bound on heap addresses.
263ae1fc9baSNico Weber   const uptr kMinAddress = 4 * 4096;
264b79ea567SVitaly Buka   if (p < kMinAddress)
265b79ea567SVitaly Buka     return false;
266ae1fc9baSNico Weber #  if defined(__x86_64__)
2670365ccd2SKirill Stoimenov   // TODO: support LAM48 and 5 level page tables.
2680365ccd2SKirill Stoimenov   // LAM_U57 mask format
2690365ccd2SKirill Stoimenov   //  * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
2700365ccd2SKirill Stoimenov   //  * top-1 byte: 0xff because it should be 0
2710365ccd2SKirill Stoimenov   //  * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
2720365ccd2SKirill Stoimenov   constexpr uptr kLAM_U57Mask = 0x81ff80;
2730365ccd2SKirill Stoimenov   constexpr uptr kPointerMask = kLAM_U57Mask << 40;
2740365ccd2SKirill Stoimenov   return ((p & kPointerMask) == 0);
275ae1fc9baSNico Weber #  elif defined(__mips64)
276ae1fc9baSNico Weber   return ((p >> 40) == 0);
277ae1fc9baSNico Weber #  elif defined(__aarch64__)
278b4b5006bSKirill Stoimenov   // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
279b4b5006bSKirill Stoimenov   // address translation and can be used to store a tag.
280b4b5006bSKirill Stoimenov   constexpr uptr kPointerMask = 255ULL << 48;
281c8644ea8SLeonard Chan   // Accept up to 48 bit VMA.
282b4b5006bSKirill Stoimenov   return ((p & kPointerMask) == 0);
2833a58e119SYouling Tang #  elif defined(__loongarch_lp64)
2843a58e119SYouling Tang   // Allow 47-bit user-space VMA at current.
2853a58e119SYouling Tang   return ((p >> 47) == 0);
286ae1fc9baSNico Weber #  else
287ae1fc9baSNico Weber   return true;
288ae1fc9baSNico Weber #  endif
289ae1fc9baSNico Weber }
290ae1fc9baSNico Weber 
291d60fdc1cSVitaly Buka namespace {
292d60fdc1cSVitaly Buka struct DirectMemoryAccessor {
293d60fdc1cSVitaly Buka   void Init(uptr begin, uptr end) {};
294d60fdc1cSVitaly Buka   void *LoadPtr(uptr p) const { return *reinterpret_cast<void **>(p); }
295d60fdc1cSVitaly Buka };
296f4c60883SVitaly Buka 
297f4c60883SVitaly Buka struct CopyMemoryAccessor {
298f4c60883SVitaly Buka   void Init(uptr begin, uptr end) {
299f4c60883SVitaly Buka     this->begin = begin;
300f4c60883SVitaly Buka     buffer.clear();
301f4c60883SVitaly Buka     buffer.resize(end - begin);
302f4c60883SVitaly Buka     MemCpyAccessible(buffer.data(), reinterpret_cast<void *>(begin),
303f4c60883SVitaly Buka                      buffer.size());
304f4c60883SVitaly Buka   };
305f4c60883SVitaly Buka 
306f4c60883SVitaly Buka   void *LoadPtr(uptr p) const {
307f4c60883SVitaly Buka     uptr offset = p - begin;
308f4c60883SVitaly Buka     CHECK_LE(offset + sizeof(void *), reinterpret_cast<uptr>(buffer.size()));
309f4c60883SVitaly Buka     return *reinterpret_cast<void **>(offset +
310f4c60883SVitaly Buka                                       reinterpret_cast<uptr>(buffer.data()));
311f4c60883SVitaly Buka   }
312f4c60883SVitaly Buka 
313f4c60883SVitaly Buka  private:
314f4c60883SVitaly Buka   uptr begin;
315f4c60883SVitaly Buka   InternalMmapVector<char> buffer;
316f4c60883SVitaly Buka };
317d60fdc1cSVitaly Buka }  // namespace
318d60fdc1cSVitaly Buka 
319ae1fc9baSNico Weber // Scans the memory range, looking for byte patterns that point into allocator
320ae1fc9baSNico Weber // chunks. Marks those chunks with |tag| and adds them to |frontier|.
321ae1fc9baSNico Weber // There are two usage modes for this function: finding reachable chunks
322ae1fc9baSNico Weber // (|tag| = kReachable) and finding indirectly leaked chunks
323ae1fc9baSNico Weber // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
324ae1fc9baSNico Weber // so |frontier| = 0.
325d60fdc1cSVitaly Buka template <class Accessor>
326d60fdc1cSVitaly Buka void ScanForPointers(uptr begin, uptr end, Frontier *frontier,
327d60fdc1cSVitaly Buka                      const char *region_type, ChunkTag tag,
328d60fdc1cSVitaly Buka                      Accessor &accessor) {
329ae1fc9baSNico Weber   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
330ae1fc9baSNico Weber   const uptr alignment = flags()->pointer_alignment();
331629b40daSMartin Liska   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
332629b40daSMartin Liska                (void *)end);
333d60fdc1cSVitaly Buka   accessor.Init(begin, end);
334ae1fc9baSNico Weber   uptr pp = begin;
335ae1fc9baSNico Weber   if (pp % alignment)
336ae1fc9baSNico Weber     pp = pp + alignment - pp % alignment;
337c0fa6322SVitaly Buka   for (; pp + sizeof(void *) <= end; pp += alignment) {
338d60fdc1cSVitaly Buka     void *p = accessor.LoadPtr(pp);
339f458d9f6SLeonard Grey #  if SANITIZER_APPLE
340ac604cc3SLeonard Grey     p = TransformPointer(p);
341f458d9f6SLeonard Grey #  endif
342c8644ea8SLeonard Chan     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
343b79ea567SVitaly Buka       continue;
344ae1fc9baSNico Weber     uptr chunk = PointsIntoChunk(p);
345b79ea567SVitaly Buka     if (!chunk)
346b79ea567SVitaly Buka       continue;
347ae1fc9baSNico Weber     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
348b79ea567SVitaly Buka     if (chunk == begin)
349b79ea567SVitaly Buka       continue;
350ae1fc9baSNico Weber     LsanMetadata m(chunk);
351b79ea567SVitaly Buka     if (m.tag() == kReachable || m.tag() == kIgnored)
352b79ea567SVitaly Buka       continue;
353ae1fc9baSNico Weber 
354ae1fc9baSNico Weber     // Do this check relatively late so we can log only the interesting cases.
355ae1fc9baSNico Weber     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
356ae1fc9baSNico Weber       LOG_POINTERS(
357ae1fc9baSNico Weber           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
358ae1fc9baSNico Weber           "%zu.\n",
359629b40daSMartin Liska           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
360629b40daSMartin Liska           m.requested_size());
361ae1fc9baSNico Weber       continue;
362ae1fc9baSNico Weber     }
363ae1fc9baSNico Weber 
364ae1fc9baSNico Weber     m.set_tag(tag);
365629b40daSMartin Liska     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
366629b40daSMartin Liska                  (void *)pp, p, (void *)chunk,
367629b40daSMartin Liska                  (void *)(chunk + m.requested_size()), m.requested_size());
368ae1fc9baSNico Weber     if (frontier)
369ae1fc9baSNico Weber       frontier->push_back(chunk);
370ae1fc9baSNico Weber   }
371ae1fc9baSNico Weber }
372ae1fc9baSNico Weber 
373d60fdc1cSVitaly Buka void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
374d60fdc1cSVitaly Buka                           const char *region_type, ChunkTag tag) {
375d60fdc1cSVitaly Buka   DirectMemoryAccessor accessor;
376d60fdc1cSVitaly Buka   ScanForPointers(begin, end, frontier, region_type, tag, accessor);
377d60fdc1cSVitaly Buka }
378d60fdc1cSVitaly Buka 
379ae1fc9baSNico Weber // Scans a global range for pointers
380ae1fc9baSNico Weber void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
381ae1fc9baSNico Weber   uptr allocator_begin = 0, allocator_end = 0;
382ae1fc9baSNico Weber   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
383ae1fc9baSNico Weber   if (begin <= allocator_begin && allocator_begin < end) {
384ae1fc9baSNico Weber     CHECK_LE(allocator_begin, allocator_end);
385ae1fc9baSNico Weber     CHECK_LE(allocator_end, end);
386ae1fc9baSNico Weber     if (begin < allocator_begin)
387ae1fc9baSNico Weber       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
388ae1fc9baSNico Weber                            kReachable);
389ae1fc9baSNico Weber     if (allocator_end < end)
390ae1fc9baSNico Weber       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
391ae1fc9baSNico Weber   } else {
392ae1fc9baSNico Weber     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
393ae1fc9baSNico Weber   }
394ae1fc9baSNico Weber }
395ae1fc9baSNico Weber 
396d60fdc1cSVitaly Buka template <class Accessor>
3970138adb6SVitaly Buka void ScanRanges(const InternalMmapVector<Range> &ranges, Frontier *frontier,
3980138adb6SVitaly Buka                 const char *region_type, Accessor &accessor) {
399d60fdc1cSVitaly Buka   for (uptr i = 0; i < ranges.size(); i++) {
4000138adb6SVitaly Buka     ScanForPointers(ranges[i].begin, ranges[i].end, frontier, region_type,
401d60fdc1cSVitaly Buka                     kReachable, accessor);
402d60fdc1cSVitaly Buka   }
403d60fdc1cSVitaly Buka }
404d60fdc1cSVitaly Buka 
405af210ee5SKirill Stoimenov void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
406af210ee5SKirill Stoimenov                           Frontier *frontier) {
407d60fdc1cSVitaly Buka   DirectMemoryAccessor accessor;
4080138adb6SVitaly Buka   ScanRanges(ranges, frontier, "FAKE STACK", accessor);
409ae1fc9baSNico Weber }
410ae1fc9baSNico Weber 
41190a10f00SRoland McGrath #  if SANITIZER_FUCHSIA
41290a10f00SRoland McGrath 
41390a10f00SRoland McGrath // Fuchsia handles all threads together with its own callback.
414e851f7dbSWiktor Garbacz static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
415e851f7dbSWiktor Garbacz                            uptr) {}
41690a10f00SRoland McGrath 
41790a10f00SRoland McGrath #  else
41890a10f00SRoland McGrath 
4196c164d60SVitaly Buka #    if SANITIZER_ANDROID
4206c164d60SVitaly Buka // FIXME: Move this out into *libcdep.cpp
4216c164d60SVitaly Buka extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
4226c164d60SVitaly Buka     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
4236c164d60SVitaly Buka #    endif
4246c164d60SVitaly Buka 
425dd922bc2SDan Liew static void ProcessThreadRegistry(Frontier *frontier) {
426dd922bc2SDan Liew   InternalMmapVector<uptr> ptrs;
427c184423eSKirill Stoimenov   GetAdditionalThreadContextPtrsLocked(&ptrs);
428dd922bc2SDan Liew 
429dd922bc2SDan Liew   for (uptr i = 0; i < ptrs.size(); ++i) {
430dd922bc2SDan Liew     void *ptr = reinterpret_cast<void *>(ptrs[i]);
431dd922bc2SDan Liew     uptr chunk = PointsIntoChunk(ptr);
432dd922bc2SDan Liew     if (!chunk)
433dd922bc2SDan Liew       continue;
434dd922bc2SDan Liew     LsanMetadata m(chunk);
435dd922bc2SDan Liew     if (!m.allocated())
436dd922bc2SDan Liew       continue;
437dd922bc2SDan Liew 
438dd922bc2SDan Liew     // Mark as reachable and add to frontier.
439dd922bc2SDan Liew     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
440dd922bc2SDan Liew     m.set_tag(kReachable);
441dd922bc2SDan Liew     frontier->push_back(chunk);
442dd922bc2SDan Liew   }
443dd922bc2SDan Liew }
444dd922bc2SDan Liew 
445ae1fc9baSNico Weber // Scans thread data (stacks and TLS) for heap pointers.
446d60fdc1cSVitaly Buka template <class Accessor>
4473142dff7SVitaly Buka static void ProcessThread(tid_t os_id, uptr sp,
4483142dff7SVitaly Buka                           const InternalMmapVector<uptr> &registers,
4493142dff7SVitaly Buka                           InternalMmapVector<Range> &extra_ranges,
450d60fdc1cSVitaly Buka                           Frontier *frontier, Accessor &accessor) {
4513142dff7SVitaly Buka   // `extra_ranges` is outside of the function and the loop to reused mapped
4523142dff7SVitaly Buka   // memory.
4533142dff7SVitaly Buka   CHECK(extra_ranges.empty());
454629b40daSMartin Liska   LOG_THREADS("Processing thread %llu.\n", os_id);
455ae1fc9baSNico Weber   uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
456ae1fc9baSNico Weber   DTLS *dtls;
457b79ea567SVitaly Buka   bool thread_found =
458b79ea567SVitaly Buka       GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
459b79ea567SVitaly Buka                             &tls_end, &cache_begin, &cache_end, &dtls);
460ae1fc9baSNico Weber   if (!thread_found) {
461ae1fc9baSNico Weber     // If a thread can't be found in the thread registry, it's probably in the
462ae1fc9baSNico Weber     // process of destruction. Log this event and move on.
463629b40daSMartin Liska     LOG_THREADS("Thread %llu not found in registry.\n", os_id);
4643142dff7SVitaly Buka     return;
465ae1fc9baSNico Weber   }
4666ffd3bbcSVitaly Buka 
4676ffd3bbcSVitaly Buka   if (!sp)
4686ffd3bbcSVitaly Buka     sp = stack_begin;
4696ffd3bbcSVitaly Buka 
4703142dff7SVitaly Buka   if (flags()->use_registers) {
471cd13476aSVitaly Buka     uptr registers_begin = reinterpret_cast<uptr>(registers.data());
472cd13476aSVitaly Buka     uptr registers_end =
473cd13476aSVitaly Buka         reinterpret_cast<uptr>(registers.data() + registers.size());
474d60fdc1cSVitaly Buka     ScanForPointers(registers_begin, registers_end, frontier, "REGISTERS",
475d60fdc1cSVitaly Buka                     kReachable, accessor);
476cd13476aSVitaly Buka   }
477ae1fc9baSNico Weber 
478ae1fc9baSNico Weber   if (flags()->use_stacks) {
479629b40daSMartin Liska     LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
480629b40daSMartin Liska                 (void *)stack_end, (void *)sp);
481ae1fc9baSNico Weber     if (sp < stack_begin || sp >= stack_end) {
482ae1fc9baSNico Weber       // SP is outside the recorded stack range (e.g. the thread is running a
483ae1fc9baSNico Weber       // signal handler on alternate stack, or swapcontext was used).
484ae1fc9baSNico Weber       // Again, consider the entire stack range to be reachable.
485ae1fc9baSNico Weber       LOG_THREADS("WARNING: stack pointer not in stack range.\n");
486ae1fc9baSNico Weber       uptr page_size = GetPageSizeCached();
487ae1fc9baSNico Weber       int skipped = 0;
488ae1fc9baSNico Weber       while (stack_begin < stack_end &&
489ae1fc9baSNico Weber              !IsAccessibleMemoryRange(stack_begin, 1)) {
490ae1fc9baSNico Weber         skipped++;
491ae1fc9baSNico Weber         stack_begin += page_size;
492ae1fc9baSNico Weber       }
4933142dff7SVitaly Buka       LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n", skipped,
4943142dff7SVitaly Buka                   (void *)stack_begin, (void *)stack_end);
495ae1fc9baSNico Weber     } else {
496ae1fc9baSNico Weber       // Shrink the stack range to ignore out-of-scope values.
497ae1fc9baSNico Weber       stack_begin = sp;
498ae1fc9baSNico Weber     }
499d60fdc1cSVitaly Buka     ScanForPointers(stack_begin, stack_end, frontier, "STACK", kReachable,
500d60fdc1cSVitaly Buka                     accessor);
501af210ee5SKirill Stoimenov     GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
5020138adb6SVitaly Buka     ScanRanges(extra_ranges, frontier, "FAKE STACK", accessor);
503ae1fc9baSNico Weber   }
504ae1fc9baSNico Weber 
505ae1fc9baSNico Weber   if (flags()->use_tls) {
506ae1fc9baSNico Weber     if (tls_begin) {
507629b40daSMartin Liska       LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
508ae1fc9baSNico Weber       // If the tls and cache ranges don't overlap, scan full tls range,
509ae1fc9baSNico Weber       // otherwise, only scan the non-overlapping portions
510ae1fc9baSNico Weber       if (cache_begin == cache_end || tls_end < cache_begin ||
511ae1fc9baSNico Weber           tls_begin > cache_end) {
512d60fdc1cSVitaly Buka         ScanForPointers(tls_begin, tls_end, frontier, "TLS", kReachable,
513d60fdc1cSVitaly Buka                         accessor);
514ae1fc9baSNico Weber       } else {
515ae1fc9baSNico Weber         if (tls_begin < cache_begin)
516d60fdc1cSVitaly Buka           ScanForPointers(tls_begin, cache_begin, frontier, "TLS", kReachable,
517d60fdc1cSVitaly Buka                           accessor);
518ae1fc9baSNico Weber         if (tls_end > cache_end)
519d60fdc1cSVitaly Buka           ScanForPointers(cache_end, tls_end, frontier, "TLS", kReachable,
520d60fdc1cSVitaly Buka                           accessor);
521ae1fc9baSNico Weber       }
522ae1fc9baSNico Weber     }
523484ec6beSVy Nguyen #    if SANITIZER_ANDROID
524b5fa4feeSVitaly Buka     extra_ranges.clear();
525484ec6beSVy Nguyen     auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
526484ec6beSVy Nguyen                    void *arg) -> void {
527b5fa4feeSVitaly Buka       reinterpret_cast<InternalMmapVector<Range> *>(arg)->push_back(
528b5fa4feeSVitaly Buka           {reinterpret_cast<uptr>(dtls_begin),
529b5fa4feeSVitaly Buka            reinterpret_cast<uptr>(dtls_end)});
530484ec6beSVy Nguyen     };
531b5fa4feeSVitaly Buka     ScanRanges(extra_ranges, frontier, "DTLS", accessor);
532484ec6beSVy Nguyen     // FIXME: There might be a race-condition here (and in Bionic) if the
533484ec6beSVy Nguyen     // thread is suspended in the middle of updating its DTLS. IOWs, we
534484ec6beSVy Nguyen     // could scan already freed memory. (probably fine for now)
535484ec6beSVy Nguyen     __libc_iterate_dynamic_tls(os_id, cb, frontier);
536484ec6beSVy Nguyen #    else
537ae1fc9baSNico Weber     if (dtls && !DTLSInDestruction(dtls)) {
538adfefa55SVitaly Buka       ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
539adfefa55SVitaly Buka         uptr dtls_beg = dtv.beg;
540adfefa55SVitaly Buka         uptr dtls_end = dtls_beg + dtv.size;
541ae1fc9baSNico Weber         if (dtls_beg < dtls_end) {
542629b40daSMartin Liska           LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
543629b40daSMartin Liska                       (void *)dtls_end);
544d60fdc1cSVitaly Buka           ScanForPointers(dtls_beg, dtls_end, frontier, "DTLS", kReachable,
545d60fdc1cSVitaly Buka                           accessor);
546ae1fc9baSNico Weber         }
547adfefa55SVitaly Buka       });
548ae1fc9baSNico Weber     } else {
549ae1fc9baSNico Weber       // We are handling a thread with DTLS under destruction. Log about
550ae1fc9baSNico Weber       // this and continue.
551629b40daSMartin Liska       LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
552ae1fc9baSNico Weber     }
553484ec6beSVy Nguyen #    endif
554ae1fc9baSNico Weber   }
555ae1fc9baSNico Weber }
556dd922bc2SDan Liew 
5573142dff7SVitaly Buka static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
5583142dff7SVitaly Buka                            Frontier *frontier, tid_t caller_tid,
5593142dff7SVitaly Buka                            uptr caller_sp) {
560f4c60883SVitaly Buka   InternalMmapVector<tid_t> done_threads;
5613142dff7SVitaly Buka   InternalMmapVector<uptr> registers;
5623142dff7SVitaly Buka   InternalMmapVector<Range> extra_ranges;
5633142dff7SVitaly Buka   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
5643142dff7SVitaly Buka     registers.clear();
5653142dff7SVitaly Buka     extra_ranges.clear();
5663142dff7SVitaly Buka 
5673142dff7SVitaly Buka     const tid_t os_id = suspended_threads.GetThreadID(i);
5683142dff7SVitaly Buka     uptr sp = 0;
5693142dff7SVitaly Buka     PtraceRegistersStatus have_registers =
5703142dff7SVitaly Buka         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
5713142dff7SVitaly Buka     if (have_registers != REGISTERS_AVAILABLE) {
572*ac38ab59SFangrui Song       VReport(1, "Unable to get registers from thread %llu.\n", os_id);
5733142dff7SVitaly Buka       // If unable to get SP, consider the entire stack to be reachable unless
5743142dff7SVitaly Buka       // GetRegistersAndSP failed with ESRCH.
5753142dff7SVitaly Buka       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
5763142dff7SVitaly Buka         continue;
5773142dff7SVitaly Buka       sp = 0;
5783142dff7SVitaly Buka     }
5793142dff7SVitaly Buka 
5803142dff7SVitaly Buka     if (os_id == caller_tid)
5813142dff7SVitaly Buka       sp = caller_sp;
5823142dff7SVitaly Buka 
583d60fdc1cSVitaly Buka     DirectMemoryAccessor accessor;
584d60fdc1cSVitaly Buka     ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
585f4c60883SVitaly Buka     if (flags()->use_detached)
586f4c60883SVitaly Buka       done_threads.push_back(os_id);
587f4c60883SVitaly Buka   }
588f4c60883SVitaly Buka 
589f4c60883SVitaly Buka   if (flags()->use_detached) {
590f4c60883SVitaly Buka     CopyMemoryAccessor accessor;
591f4c60883SVitaly Buka     InternalMmapVector<tid_t> known_threads;
592f4c60883SVitaly Buka     GetRunningThreadsLocked(&known_threads);
593f4c60883SVitaly Buka     Sort(done_threads.data(), done_threads.size());
594f4c60883SVitaly Buka     for (tid_t os_id : known_threads) {
595f4c60883SVitaly Buka       registers.clear();
596f4c60883SVitaly Buka       extra_ranges.clear();
597f4c60883SVitaly Buka 
598f4c60883SVitaly Buka       uptr i = InternalLowerBound(done_threads, os_id);
599f4c60883SVitaly Buka       if (i >= done_threads.size() || done_threads[i] != os_id) {
600f4c60883SVitaly Buka         uptr sp = (os_id == caller_tid) ? caller_sp : 0;
601f4c60883SVitaly Buka         ProcessThread(os_id, sp, registers, extra_ranges, frontier, accessor);
602f4c60883SVitaly Buka       }
603f4c60883SVitaly Buka     }
6043142dff7SVitaly Buka   }
6053142dff7SVitaly Buka 
606dd922bc2SDan Liew   // Add pointers reachable from ThreadContexts
607dd922bc2SDan Liew   ProcessThreadRegistry(frontier);
608ae1fc9baSNico Weber }
609ae1fc9baSNico Weber 
61090a10f00SRoland McGrath #  endif  // SANITIZER_FUCHSIA
61190a10f00SRoland McGrath 
61299dc6838SVitaly Buka // A map that contains [region_begin, region_end) pairs.
61399dc6838SVitaly Buka using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
61499dc6838SVitaly Buka 
61599dc6838SVitaly Buka static RootRegions &GetRootRegionsLocked() {
61699dc6838SVitaly Buka   global_mutex.CheckLocked();
61799dc6838SVitaly Buka   static RootRegions *regions = nullptr;
61899dc6838SVitaly Buka   alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
61999dc6838SVitaly Buka   if (!regions)
62099dc6838SVitaly Buka     regions = new (placeholder) RootRegions();
62199dc6838SVitaly Buka   return *regions;
62299dc6838SVitaly Buka }
62399dc6838SVitaly Buka 
62499dc6838SVitaly Buka bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
6250a6aec2fSVitaly Buka 
6260a6aec2fSVitaly Buka void ScanRootRegions(Frontier *frontier,
6270a6aec2fSVitaly Buka                      const InternalMmapVectorNoCtor<Region> &mapped_regions) {
6280375a2dcSVitaly Buka   if (!flags()->use_root_regions)
6290a6aec2fSVitaly Buka     return;
6300a6aec2fSVitaly Buka 
63199dc6838SVitaly Buka   InternalMmapVector<Region> regions;
63299dc6838SVitaly Buka   GetRootRegionsLocked().forEach([&](const auto &kv) {
63399dc6838SVitaly Buka     regions.push_back({kv.first.first, kv.first.second});
63499dc6838SVitaly Buka     return true;
63599dc6838SVitaly Buka   });
63699dc6838SVitaly Buka 
6370375a2dcSVitaly Buka   InternalMmapVector<Region> intersection;
63899dc6838SVitaly Buka   Intersect(mapped_regions, regions, intersection);
6390375a2dcSVitaly Buka 
6400375a2dcSVitaly Buka   for (const Region &r : intersection) {
6410375a2dcSVitaly Buka     LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
6420375a2dcSVitaly Buka                  (void *)r.begin, (void *)r.end);
6430375a2dcSVitaly Buka     ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
6440375a2dcSVitaly Buka   }
6450a6aec2fSVitaly Buka }
6460a6aec2fSVitaly Buka 
647ae1fc9baSNico Weber // Scans root regions for heap pointers.
648ae1fc9baSNico Weber static void ProcessRootRegions(Frontier *frontier) {
64919b7b93dSVitaly Buka   if (!flags()->use_root_regions || !HasRootRegions())
650b79ea567SVitaly Buka     return;
65119b7b93dSVitaly Buka   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
65219b7b93dSVitaly Buka   MemoryMappedSegment segment;
653506923a1SVitaly Buka   InternalMmapVector<Region> mapped_regions;
65419b7b93dSVitaly Buka   while (proc_maps.Next(&segment))
65519b7b93dSVitaly Buka     if (segment.IsReadable())
65619b7b93dSVitaly Buka       mapped_regions.push_back({segment.start, segment.end});
65719b7b93dSVitaly Buka   ScanRootRegions(frontier, mapped_regions);
658ae1fc9baSNico Weber }
659ae1fc9baSNico Weber 
660ae1fc9baSNico Weber static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
661ae1fc9baSNico Weber   while (frontier->size()) {
662ae1fc9baSNico Weber     uptr next_chunk = frontier->back();
663ae1fc9baSNico Weber     frontier->pop_back();
664ae1fc9baSNico Weber     LsanMetadata m(next_chunk);
665ae1fc9baSNico Weber     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
666ae1fc9baSNico Weber                          "HEAP", tag);
667ae1fc9baSNico Weber   }
668ae1fc9baSNico Weber }
669ae1fc9baSNico Weber 
670ae1fc9baSNico Weber // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
671ae1fc9baSNico Weber // which are reachable from it as indirectly leaked.
672ae1fc9baSNico Weber static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
673ae1fc9baSNico Weber   chunk = GetUserBegin(chunk);
674ae1fc9baSNico Weber   LsanMetadata m(chunk);
675ae1fc9baSNico Weber   if (m.allocated() && m.tag() != kReachable) {
676ae1fc9baSNico Weber     ScanRangeForPointers(chunk, chunk + m.requested_size(),
677ae1fc9baSNico Weber                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
678ae1fc9baSNico Weber   }
679ae1fc9baSNico Weber }
680ae1fc9baSNico Weber 
6819a023701SVitaly Buka static void IgnoredSuppressedCb(uptr chunk, void *arg) {
6829a023701SVitaly Buka   CHECK(arg);
6839a023701SVitaly Buka   chunk = GetUserBegin(chunk);
6849a023701SVitaly Buka   LsanMetadata m(chunk);
6859a023701SVitaly Buka   if (!m.allocated() || m.tag() == kIgnored)
6869a023701SVitaly Buka     return;
6879a023701SVitaly Buka 
6889a023701SVitaly Buka   const InternalMmapVector<u32> &suppressed =
6899a023701SVitaly Buka       *static_cast<const InternalMmapVector<u32> *>(arg);
6909a023701SVitaly Buka   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
6919a023701SVitaly Buka   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
6929a023701SVitaly Buka     return;
6939a023701SVitaly Buka 
694629b40daSMartin Liska   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
695629b40daSMartin Liska                (void *)(chunk + m.requested_size()), m.requested_size());
6969a023701SVitaly Buka   m.set_tag(kIgnored);
6979a023701SVitaly Buka }
6989a023701SVitaly Buka 
699ae1fc9baSNico Weber // ForEachChunk callback. If chunk is marked as ignored, adds its address to
700ae1fc9baSNico Weber // frontier.
701ae1fc9baSNico Weber static void CollectIgnoredCb(uptr chunk, void *arg) {
702ae1fc9baSNico Weber   CHECK(arg);
703ae1fc9baSNico Weber   chunk = GetUserBegin(chunk);
704ae1fc9baSNico Weber   LsanMetadata m(chunk);
705ae1fc9baSNico Weber   if (m.allocated() && m.tag() == kIgnored) {
706629b40daSMartin Liska     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
707629b40daSMartin Liska                  (void *)(chunk + m.requested_size()), m.requested_size());
708ae1fc9baSNico Weber     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
709ae1fc9baSNico Weber   }
710ae1fc9baSNico Weber }
711ae1fc9baSNico Weber 
712ae1fc9baSNico Weber // Sets the appropriate tag on each chunk.
713aae707cdSRoland McGrath static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
71439db4919SWiktor Garbacz                               Frontier *frontier, tid_t caller_tid,
71539db4919SWiktor Garbacz                               uptr caller_sp) {
7169a023701SVitaly Buka   const InternalMmapVector<u32> &suppressed_stacks =
7179a023701SVitaly Buka       GetSuppressionContext()->GetSortedSuppressedStacks();
7189a023701SVitaly Buka   if (!suppressed_stacks.empty()) {
7199a023701SVitaly Buka     ForEachChunk(IgnoredSuppressedCb,
7209a023701SVitaly Buka                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
7219a023701SVitaly Buka   }
722aae707cdSRoland McGrath   ForEachChunk(CollectIgnoredCb, frontier);
723aae707cdSRoland McGrath   ProcessGlobalRegions(frontier);
72439db4919SWiktor Garbacz   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
725aae707cdSRoland McGrath   ProcessRootRegions(frontier);
726aae707cdSRoland McGrath   FloodFillTag(frontier, kReachable);
727ae1fc9baSNico Weber 
728ae1fc9baSNico Weber   // The check here is relatively expensive, so we do this in a separate flood
729ae1fc9baSNico Weber   // fill. That way we can skip the check for chunks that are reachable
730ae1fc9baSNico Weber   // otherwise.
731ae1fc9baSNico Weber   LOG_POINTERS("Processing platform-specific allocations.\n");
732aae707cdSRoland McGrath   ProcessPlatformSpecificAllocations(frontier);
733aae707cdSRoland McGrath   FloodFillTag(frontier, kReachable);
734ae1fc9baSNico Weber 
735ae1fc9baSNico Weber   // Iterate over leaked chunks and mark those that are reachable from other
736ae1fc9baSNico Weber   // leaked chunks.
737ae1fc9baSNico Weber   LOG_POINTERS("Scanning leaked chunks.\n");
738ae1fc9baSNico Weber   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
739ae1fc9baSNico Weber }
740ae1fc9baSNico Weber 
741ae1fc9baSNico Weber // ForEachChunk callback. Resets the tags to pre-leak-check state.
742ae1fc9baSNico Weber static void ResetTagsCb(uptr chunk, void *arg) {
743ae1fc9baSNico Weber   (void)arg;
744ae1fc9baSNico Weber   chunk = GetUserBegin(chunk);
745ae1fc9baSNico Weber   LsanMetadata m(chunk);
746ae1fc9baSNico Weber   if (m.allocated() && m.tag() != kIgnored)
747ae1fc9baSNico Weber     m.set_tag(kDirectlyLeaked);
748ae1fc9baSNico Weber }
749ae1fc9baSNico Weber 
750ae1fc9baSNico Weber // ForEachChunk callback. Aggregates information about unreachable chunks into
751ae1fc9baSNico Weber // a LeakReport.
752ae1fc9baSNico Weber static void CollectLeaksCb(uptr chunk, void *arg) {
753ae1fc9baSNico Weber   CHECK(arg);
754f72e5094SVitaly Buka   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
755ae1fc9baSNico Weber   chunk = GetUserBegin(chunk);
756ae1fc9baSNico Weber   LsanMetadata m(chunk);
757b79ea567SVitaly Buka   if (!m.allocated())
758b79ea567SVitaly Buka     return;
759f72e5094SVitaly Buka   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
760f72e5094SVitaly Buka     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
761ae1fc9baSNico Weber }
762ae1fc9baSNico Weber 
7639a5261efSVitaly Buka void LeakSuppressionContext::PrintMatchedSuppressions() {
764ae1fc9baSNico Weber   InternalMmapVector<Suppression *> matched;
7659a5261efSVitaly Buka   context.GetMatched(&matched);
766ae1fc9baSNico Weber   if (!matched.size())
767ae1fc9baSNico Weber     return;
768ae1fc9baSNico Weber   const char *line = "-----------------------------------------------------";
769ae1fc9baSNico Weber   Printf("%s\n", line);
770ae1fc9baSNico Weber   Printf("Suppressions used:\n");
771ae1fc9baSNico Weber   Printf("  count      bytes template\n");
7729a5261efSVitaly Buka   for (uptr i = 0; i < matched.size(); i++) {
7739a5261efSVitaly Buka     Printf("%7zu %10zu %s\n",
7749a5261efSVitaly Buka            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
7759a5261efSVitaly Buka            matched[i]->weight, matched[i]->templ);
7769a5261efSVitaly Buka   }
777ae1fc9baSNico Weber   Printf("%s\n\n", line);
778ae1fc9baSNico Weber }
779ae1fc9baSNico Weber 
78090a10f00SRoland McGrath #  if SANITIZER_FUCHSIA
78190a10f00SRoland McGrath 
78290a10f00SRoland McGrath // Fuchsia provides a libc interface that guarantees all threads are
78390a10f00SRoland McGrath // covered, and SuspendedThreadList is never really used.
7843137b6a2SHans Wennborg static bool ReportUnsuspendedThreads(const SuspendedThreadsList &) {
7853137b6a2SHans Wennborg   return true;
7863137b6a2SHans Wennborg }
78790a10f00SRoland McGrath 
78890a10f00SRoland McGrath #  else  // !SANITIZER_FUCHSIA
78990a10f00SRoland McGrath 
7909766ce4dSVitaly Buka static bool ReportUnsuspendedThreads(
791ae1fc9baSNico Weber     const SuspendedThreadsList &suspended_threads) {
792ae1fc9baSNico Weber   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
793ae1fc9baSNico Weber   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
794ae1fc9baSNico Weber     threads[i] = suspended_threads.GetThreadID(i);
795ae1fc9baSNico Weber 
796ae1fc9baSNico Weber   Sort(threads.data(), threads.size());
797eb2db81eSVitaly Buka 
798b2b0e6c0SVitaly Buka   InternalMmapVector<tid_t> known_threads;
799b2b0e6c0SVitaly Buka   GetRunningThreadsLocked(&known_threads);
800c184423eSKirill Stoimenov 
8019766ce4dSVitaly Buka   bool succeded = true;
802b2b0e6c0SVitaly Buka   for (auto os_id : known_threads) {
803c184423eSKirill Stoimenov     uptr i = InternalLowerBound(threads, os_id);
8049766ce4dSVitaly Buka     if (i >= threads.size() || threads[i] != os_id) {
8059766ce4dSVitaly Buka       succeded = false;
806c184423eSKirill Stoimenov       Report(
807c184423eSKirill Stoimenov           "Running thread %zu was not suspended. False leaks are possible.\n",
808c184423eSKirill Stoimenov           os_id);
809c184423eSKirill Stoimenov     }
810ae1fc9baSNico Weber   }
8119766ce4dSVitaly Buka   return succeded;
8129766ce4dSVitaly Buka }
813ae1fc9baSNico Weber 
81490a10f00SRoland McGrath #  endif  // !SANITIZER_FUCHSIA
81590a10f00SRoland McGrath 
816ae1fc9baSNico Weber static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
817ae1fc9baSNico Weber                                   void *arg) {
818ae1fc9baSNico Weber   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
819ae1fc9baSNico Weber   CHECK(param);
820ae1fc9baSNico Weber   CHECK(!param->success);
8219766ce4dSVitaly Buka   if (!ReportUnsuspendedThreads(suspended_threads)) {
8229766ce4dSVitaly Buka     switch (flags()->thread_suspend_fail) {
8239766ce4dSVitaly Buka       case 0:
8249766ce4dSVitaly Buka         param->success = true;
8259766ce4dSVitaly Buka         return;
8269766ce4dSVitaly Buka       case 1:
8279766ce4dSVitaly Buka         break;
8289766ce4dSVitaly Buka       case 2:
8299766ce4dSVitaly Buka         // Will crash on return.
8309766ce4dSVitaly Buka         return;
8319766ce4dSVitaly Buka     }
8329766ce4dSVitaly Buka   }
83339db4919SWiktor Garbacz   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
83439db4919SWiktor Garbacz                     param->caller_sp);
835f72e5094SVitaly Buka   ForEachChunk(CollectLeaksCb, &param->leaks);
836ae1fc9baSNico Weber   // Clean up for subsequent leak checks. This assumes we did not overwrite any
837ae1fc9baSNico Weber   // kIgnored tags.
838ae1fc9baSNico Weber   ForEachChunk(ResetTagsCb, nullptr);
839ae1fc9baSNico Weber   param->success = true;
840ae1fc9baSNico Weber }
841ae1fc9baSNico Weber 
8429b25b806SVitaly Buka static bool PrintResults(LeakReport &report) {
8439b25b806SVitaly Buka   uptr unsuppressed_count = report.UnsuppressedLeakCount();
8449b25b806SVitaly Buka   if (unsuppressed_count) {
8459b25b806SVitaly Buka     Decorator d;
8469b25b806SVitaly Buka     Printf(
8479b25b806SVitaly Buka         "\n"
8489b25b806SVitaly Buka         "================================================================="
8499b25b806SVitaly Buka         "\n");
8509b25b806SVitaly Buka     Printf("%s", d.Error());
8519b25b806SVitaly Buka     Report("ERROR: LeakSanitizer: detected memory leaks\n");
8529b25b806SVitaly Buka     Printf("%s", d.Default());
8539b25b806SVitaly Buka     report.ReportTopLeaks(flags()->max_leaks);
8549b25b806SVitaly Buka   }
8559b25b806SVitaly Buka   if (common_flags()->print_suppressions)
8569b25b806SVitaly Buka     GetSuppressionContext()->PrintMatchedSuppressions();
857fa81868fSVitaly Buka   if (unsuppressed_count)
8589b25b806SVitaly Buka     report.PrintSummary();
859fa81868fSVitaly Buka   if ((unsuppressed_count && common_flags()->verbosity >= 2) ||
860fa81868fSVitaly Buka       flags()->log_threads)
861fa81868fSVitaly Buka     PrintThreads();
862fa81868fSVitaly Buka   return unsuppressed_count;
8639b25b806SVitaly Buka }
8649b25b806SVitaly Buka 
865e1cff8bfSVitaly Buka static bool CheckForLeaksOnce() {
866ca3c58f0SVitaly Buka   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
867453d373eSVitaly Buka     VReport(1, "LeakSanitizer is disabled\n");
868ae1fc9baSNico Weber     return false;
869ca3c58f0SVitaly Buka   }
870453d373eSVitaly Buka   VReport(1, "LeakSanitizer: checking for leaks\n");
8719a023701SVitaly Buka   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
8729a023701SVitaly Buka   // suppressions. However if a stack id was previously suppressed, it should be
8739a023701SVitaly Buka   // suppressed in future checks as well.
8749a023701SVitaly Buka   for (int i = 0;; ++i) {
875ae1fc9baSNico Weber     EnsureMainThreadIDIsCorrect();
876ae1fc9baSNico Weber     CheckForLeaksParam param;
87739db4919SWiktor Garbacz     // Capture calling thread's stack pointer early, to avoid false negatives.
87839db4919SWiktor Garbacz     // Old frame with dead pointers might be overlapped by new frame inside
87939db4919SWiktor Garbacz     // CheckForLeaks which does not use bytes with pointers before the
88039db4919SWiktor Garbacz     // threads are suspended and stack pointers captured.
88139db4919SWiktor Garbacz     param.caller_tid = GetTid();
88239db4919SWiktor Garbacz     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
883f1b6bd40SEvgeniy Stepanov     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
884ae1fc9baSNico Weber     if (!param.success) {
885ae1fc9baSNico Weber       Report("LeakSanitizer has encountered a fatal error.\n");
886ae1fc9baSNico Weber       Report(
887ae1fc9baSNico Weber           "HINT: For debugging, try setting environment variable "
888ae1fc9baSNico Weber           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
889ae1fc9baSNico Weber       Report(
8909a023701SVitaly Buka           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
8919a023701SVitaly Buka           "etc)\n");
892ae1fc9baSNico Weber       Die();
893ae1fc9baSNico Weber     }
894f72e5094SVitaly Buka     LeakReport leak_report;
895f72e5094SVitaly Buka     leak_report.AddLeakedChunks(param.leaks);
896f72e5094SVitaly Buka 
8979a023701SVitaly Buka     // No new suppressions stacks, so rerun will not help and we can report.
898f72e5094SVitaly Buka     if (!leak_report.ApplySuppressions())
899f72e5094SVitaly Buka       return PrintResults(leak_report);
9009a023701SVitaly Buka 
9019a023701SVitaly Buka     // No indirect leaks to report, so we are done here.
902f72e5094SVitaly Buka     if (!leak_report.IndirectUnsuppressedLeakCount())
903f72e5094SVitaly Buka       return PrintResults(leak_report);
9049a023701SVitaly Buka 
9059a023701SVitaly Buka     if (i >= 8) {
9069a023701SVitaly Buka       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
907f72e5094SVitaly Buka       return PrintResults(leak_report);
9089a023701SVitaly Buka     }
9099a023701SVitaly Buka 
9109a023701SVitaly Buka     // We found a new previously unseen suppressed call stack. Rerun to make
9119a023701SVitaly Buka     // sure it does not hold indirect leaks.
9129a023701SVitaly Buka     VReport(1, "Rerun with %zu suppressed stacks.",
9139a023701SVitaly Buka             GetSuppressionContext()->GetSortedSuppressedStacks().size());
9149a023701SVitaly Buka   }
915ae1fc9baSNico Weber }
916ae1fc9baSNico Weber 
917e1cff8bfSVitaly Buka static bool CheckForLeaks() {
918e1cff8bfSVitaly Buka   int leaking_tries = 0;
919e1cff8bfSVitaly Buka   for (int i = 0; i < flags()->tries; ++i) leaking_tries += CheckForLeaksOnce();
920e1cff8bfSVitaly Buka   return leaking_tries == flags()->tries;
921e1cff8bfSVitaly Buka }
922e1cff8bfSVitaly Buka 
923ae1fc9baSNico Weber static bool has_reported_leaks = false;
924ae1fc9baSNico Weber bool HasReportedLeaks() { return has_reported_leaks; }
925ae1fc9baSNico Weber 
926ae1fc9baSNico Weber void DoLeakCheck() {
92756debbf5SDmitry Vyukov   Lock l(&global_mutex);
928ae1fc9baSNico Weber   static bool already_done;
929b79ea567SVitaly Buka   if (already_done)
930b79ea567SVitaly Buka     return;
931ae1fc9baSNico Weber   already_done = true;
932ae1fc9baSNico Weber   has_reported_leaks = CheckForLeaks();
933b79ea567SVitaly Buka   if (has_reported_leaks)
934b79ea567SVitaly Buka     HandleLeaks();
935ae1fc9baSNico Weber }
936ae1fc9baSNico Weber 
937ae1fc9baSNico Weber static int DoRecoverableLeakCheck() {
93856debbf5SDmitry Vyukov   Lock l(&global_mutex);
939ae1fc9baSNico Weber   bool have_leaks = CheckForLeaks();
940ae1fc9baSNico Weber   return have_leaks ? 1 : 0;
941ae1fc9baSNico Weber }
942ae1fc9baSNico Weber 
943ae1fc9baSNico Weber void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
944ae1fc9baSNico Weber 
945ae1fc9baSNico Weber ///// LeakReport implementation. /////
946ae1fc9baSNico Weber 
947ae1fc9baSNico Weber // A hard limit on the number of distinct leaks, to avoid quadratic complexity
948ae1fc9baSNico Weber // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
949ae1fc9baSNico Weber // in real-world applications.
950f72e5094SVitaly Buka // FIXME: Get rid of this limit by moving logic into DedupLeaks.
951ae1fc9baSNico Weber const uptr kMaxLeaksConsidered = 5000;
952ae1fc9baSNico Weber 
953f72e5094SVitaly Buka void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
954f72e5094SVitaly Buka   for (const LeakedChunk &leak : chunks) {
955f72e5094SVitaly Buka     uptr chunk = leak.chunk;
956f72e5094SVitaly Buka     u32 stack_trace_id = leak.stack_trace_id;
957f72e5094SVitaly Buka     uptr leaked_size = leak.leaked_size;
958f72e5094SVitaly Buka     ChunkTag tag = leak.tag;
959ae1fc9baSNico Weber     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
960051d766bSVitaly Buka 
961051d766bSVitaly Buka     if (u32 resolution = flags()->resolution) {
962ca0036dfSVitaly Buka       StackTrace stack = StackDepotGet(stack_trace_id);
963051d766bSVitaly Buka       stack.size = Min(stack.size, resolution);
964051d766bSVitaly Buka       stack_trace_id = StackDepotPut(stack);
965051d766bSVitaly Buka     }
966051d766bSVitaly Buka 
967ae1fc9baSNico Weber     bool is_directly_leaked = (tag == kDirectlyLeaked);
968ae1fc9baSNico Weber     uptr i;
969ae1fc9baSNico Weber     for (i = 0; i < leaks_.size(); i++) {
970ae1fc9baSNico Weber       if (leaks_[i].stack_trace_id == stack_trace_id &&
971ae1fc9baSNico Weber           leaks_[i].is_directly_leaked == is_directly_leaked) {
972ae1fc9baSNico Weber         leaks_[i].hit_count++;
973ae1fc9baSNico Weber         leaks_[i].total_size += leaked_size;
974ae1fc9baSNico Weber         break;
975ae1fc9baSNico Weber       }
976ae1fc9baSNico Weber     }
977ae1fc9baSNico Weber     if (i == leaks_.size()) {
978b79ea567SVitaly Buka       if (leaks_.size() == kMaxLeaksConsidered)
979b79ea567SVitaly Buka         return;
980f72e5094SVitaly Buka       Leak leak = {next_id_++,         /* hit_count */ 1,
981f72e5094SVitaly Buka                    leaked_size,        stack_trace_id,
982f72e5094SVitaly Buka                    is_directly_leaked, /* is_suppressed */ false};
983ae1fc9baSNico Weber       leaks_.push_back(leak);
984ae1fc9baSNico Weber     }
985ae1fc9baSNico Weber     if (flags()->report_objects) {
986eb3be660SKirill Stoimenov       LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
987ae1fc9baSNico Weber       leaked_objects_.push_back(obj);
988ae1fc9baSNico Weber     }
989ae1fc9baSNico Weber   }
990f72e5094SVitaly Buka }
991ae1fc9baSNico Weber 
992ae1fc9baSNico Weber static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
993ae1fc9baSNico Weber   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
994ae1fc9baSNico Weber     return leak1.total_size > leak2.total_size;
995ae1fc9baSNico Weber   else
996ae1fc9baSNico Weber     return leak1.is_directly_leaked;
997ae1fc9baSNico Weber }
998ae1fc9baSNico Weber 
999ae1fc9baSNico Weber void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
1000ae1fc9baSNico Weber   CHECK(leaks_.size() <= kMaxLeaksConsidered);
1001ae1fc9baSNico Weber   Printf("\n");
1002ae1fc9baSNico Weber   if (leaks_.size() == kMaxLeaksConsidered)
1003b79ea567SVitaly Buka     Printf(
1004b79ea567SVitaly Buka         "Too many leaks! Only the first %zu leaks encountered will be "
1005ae1fc9baSNico Weber         "reported.\n",
1006ae1fc9baSNico Weber         kMaxLeaksConsidered);
1007ae1fc9baSNico Weber 
1008ae1fc9baSNico Weber   uptr unsuppressed_count = UnsuppressedLeakCount();
1009ae1fc9baSNico Weber   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
1010ae1fc9baSNico Weber     Printf("The %zu top leak(s):\n", num_leaks_to_report);
1011ae1fc9baSNico Weber   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
1012ae1fc9baSNico Weber   uptr leaks_reported = 0;
1013ae1fc9baSNico Weber   for (uptr i = 0; i < leaks_.size(); i++) {
1014b79ea567SVitaly Buka     if (leaks_[i].is_suppressed)
1015b79ea567SVitaly Buka       continue;
1016ae1fc9baSNico Weber     PrintReportForLeak(i);
1017ae1fc9baSNico Weber     leaks_reported++;
1018b79ea567SVitaly Buka     if (leaks_reported == num_leaks_to_report)
1019b79ea567SVitaly Buka       break;
1020ae1fc9baSNico Weber   }
1021ae1fc9baSNico Weber   if (leaks_reported < unsuppressed_count) {
1022ae1fc9baSNico Weber     uptr remaining = unsuppressed_count - leaks_reported;
1023ae1fc9baSNico Weber     Printf("Omitting %zu more leak(s).\n", remaining);
1024ae1fc9baSNico Weber   }
1025ae1fc9baSNico Weber }
1026ae1fc9baSNico Weber 
1027ae1fc9baSNico Weber void LeakReport::PrintReportForLeak(uptr index) {
1028ae1fc9baSNico Weber   Decorator d;
1029ae1fc9baSNico Weber   Printf("%s", d.Leak());
1030ae1fc9baSNico Weber   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
1031ae1fc9baSNico Weber          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
1032ae1fc9baSNico Weber          leaks_[index].total_size, leaks_[index].hit_count);
1033ae1fc9baSNico Weber   Printf("%s", d.Default());
1034ae1fc9baSNico Weber 
1035051d766bSVitaly Buka   CHECK(leaks_[index].stack_trace_id);
1036ca0036dfSVitaly Buka   StackDepotGet(leaks_[index].stack_trace_id).Print();
1037ae1fc9baSNico Weber 
1038ae1fc9baSNico Weber   if (flags()->report_objects) {
1039ae1fc9baSNico Weber     Printf("Objects leaked above:\n");
1040ae1fc9baSNico Weber     PrintLeakedObjectsForLeak(index);
1041ae1fc9baSNico Weber     Printf("\n");
1042ae1fc9baSNico Weber   }
1043ae1fc9baSNico Weber }
1044ae1fc9baSNico Weber 
1045ae1fc9baSNico Weber void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
1046ae1fc9baSNico Weber   u32 leak_id = leaks_[index].id;
1047ae1fc9baSNico Weber   for (uptr j = 0; j < leaked_objects_.size(); j++) {
1048ae1fc9baSNico Weber     if (leaked_objects_[j].leak_id == leak_id)
1049629b40daSMartin Liska       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
1050ae1fc9baSNico Weber              leaked_objects_[j].size);
1051ae1fc9baSNico Weber   }
1052ae1fc9baSNico Weber }
1053ae1fc9baSNico Weber 
1054ae1fc9baSNico Weber void LeakReport::PrintSummary() {
1055ae1fc9baSNico Weber   CHECK(leaks_.size() <= kMaxLeaksConsidered);
1056ae1fc9baSNico Weber   uptr bytes = 0, allocations = 0;
1057ae1fc9baSNico Weber   for (uptr i = 0; i < leaks_.size(); i++) {
1058b79ea567SVitaly Buka     if (leaks_[i].is_suppressed)
1059b79ea567SVitaly Buka       continue;
1060ae1fc9baSNico Weber     bytes += leaks_[i].total_size;
1061ae1fc9baSNico Weber     allocations += leaks_[i].hit_count;
1062ae1fc9baSNico Weber   }
1063e0dadf3dSVitaly Buka   InternalScopedString summary;
10645b7dfa96SVitaly Buka   summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
1065ae1fc9baSNico Weber                   allocations);
1066ae1fc9baSNico Weber   ReportErrorSummary(summary.data());
1067ae1fc9baSNico Weber }
1068ae1fc9baSNico Weber 
10699a023701SVitaly Buka uptr LeakReport::ApplySuppressions() {
10709a5261efSVitaly Buka   LeakSuppressionContext *suppressions = GetSuppressionContext();
1071cbb9369bSBojun Seo   uptr new_suppressions = 0;
1072ae1fc9baSNico Weber   for (uptr i = 0; i < leaks_.size(); i++) {
1073a9a14990SVitaly Buka     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
1074a9a14990SVitaly Buka                                leaks_[i].total_size)) {
1075ae1fc9baSNico Weber       leaks_[i].is_suppressed = true;
10769a023701SVitaly Buka       ++new_suppressions;
1077ae1fc9baSNico Weber     }
1078ae1fc9baSNico Weber   }
10799a023701SVitaly Buka   return new_suppressions;
1080ae1fc9baSNico Weber }
1081ae1fc9baSNico Weber 
1082ae1fc9baSNico Weber uptr LeakReport::UnsuppressedLeakCount() {
1083ae1fc9baSNico Weber   uptr result = 0;
1084ae1fc9baSNico Weber   for (uptr i = 0; i < leaks_.size(); i++)
1085b79ea567SVitaly Buka     if (!leaks_[i].is_suppressed)
1086b79ea567SVitaly Buka       result++;
1087ae1fc9baSNico Weber   return result;
1088ae1fc9baSNico Weber }
1089ae1fc9baSNico Weber 
10909a023701SVitaly Buka uptr LeakReport::IndirectUnsuppressedLeakCount() {
10919a023701SVitaly Buka   uptr result = 0;
10929a023701SVitaly Buka   for (uptr i = 0; i < leaks_.size(); i++)
10939a023701SVitaly Buka     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
10949a023701SVitaly Buka       result++;
10959a023701SVitaly Buka   return result;
10969a023701SVitaly Buka }
10979a023701SVitaly Buka 
1098ae1fc9baSNico Weber }  // namespace __lsan
1099ae1fc9baSNico Weber #else   // CAN_SANITIZE_LEAKS
1100ae1fc9baSNico Weber namespace __lsan {
1101ae1fc9baSNico Weber void InitCommonLsan() {}
1102ae1fc9baSNico Weber void DoLeakCheck() {}
1103ae1fc9baSNico Weber void DoRecoverableLeakCheckVoid() {}
1104ae1fc9baSNico Weber void DisableInThisThread() {}
1105ae1fc9baSNico Weber void EnableInThisThread() {}
1106b79ea567SVitaly Buka }  // namespace __lsan
1107ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1108ae1fc9baSNico Weber 
1109c0fa6322SVitaly Buka using namespace __lsan;
1110ae1fc9baSNico Weber 
1111ae1fc9baSNico Weber extern "C" {
1112ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1113ae1fc9baSNico Weber void __lsan_ignore_object(const void *p) {
1114ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1115ae1fc9baSNico Weber   if (!common_flags()->detect_leaks)
1116ae1fc9baSNico Weber     return;
1117ae1fc9baSNico Weber   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
1118ae1fc9baSNico Weber   // locked.
111956debbf5SDmitry Vyukov   Lock l(&global_mutex);
112039c06024SVitaly Buka   IgnoreObjectResult res = IgnoreObject(p);
1121ae1fc9baSNico Weber   if (res == kIgnoreObjectInvalid)
1122b15cf06fSLeonard Grey     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
1123ae1fc9baSNico Weber   if (res == kIgnoreObjectAlreadyIgnored)
1124b79ea567SVitaly Buka     VReport(1,
1125b79ea567SVitaly Buka             "__lsan_ignore_object(): "
1126b79ea567SVitaly Buka             "heap object at %p is already being ignored\n",
1127b79ea567SVitaly Buka             p);
1128ae1fc9baSNico Weber   if (res == kIgnoreObjectSuccess)
1129ae1fc9baSNico Weber     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
1130ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1131ae1fc9baSNico Weber }
1132ae1fc9baSNico Weber 
1133ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1134ae1fc9baSNico Weber void __lsan_register_root_region(const void *begin, uptr size) {
1135ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1136629b40daSMartin Liska   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
11378ce66a1fSVitaly Buka   uptr b = reinterpret_cast<uptr>(begin);
11388ce66a1fSVitaly Buka   uptr e = b + size;
11398ce66a1fSVitaly Buka   CHECK_LT(b, e);
11408ce66a1fSVitaly Buka 
11418ce66a1fSVitaly Buka   Lock l(&global_mutex);
114299dc6838SVitaly Buka   ++GetRootRegionsLocked()[{b, e}];
1143ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1144ae1fc9baSNico Weber }
1145ae1fc9baSNico Weber 
1146ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1147ae1fc9baSNico Weber void __lsan_unregister_root_region(const void *begin, uptr size) {
1148ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
11498ce66a1fSVitaly Buka   uptr b = reinterpret_cast<uptr>(begin);
11508ce66a1fSVitaly Buka   uptr e = b + size;
11518ce66a1fSVitaly Buka   CHECK_LT(b, e);
115299dc6838SVitaly Buka   VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
11538ce66a1fSVitaly Buka 
11548ce66a1fSVitaly Buka   {
115556debbf5SDmitry Vyukov     Lock l(&global_mutex);
115699dc6838SVitaly Buka     if (auto *f = GetRootRegionsLocked().find({b, e})) {
115799dc6838SVitaly Buka       if (--(f->second) == 0)
115899dc6838SVitaly Buka         GetRootRegionsLocked().erase(f);
11598ce66a1fSVitaly Buka       return;
1160ae1fc9baSNico Weber     }
1161ae1fc9baSNico Weber   }
1162ae1fc9baSNico Weber   Report(
1163629b40daSMartin Liska       "__lsan_unregister_root_region(): region at %p of size %zu has not "
1164ae1fc9baSNico Weber       "been registered.\n",
1165ae1fc9baSNico Weber       begin, size);
1166ae1fc9baSNico Weber   Die();
1167ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1168ae1fc9baSNico Weber }
1169ae1fc9baSNico Weber 
1170ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1171ae1fc9baSNico Weber void __lsan_disable() {
1172ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1173ae1fc9baSNico Weber   __lsan::DisableInThisThread();
1174ae1fc9baSNico Weber #endif
1175ae1fc9baSNico Weber }
1176ae1fc9baSNico Weber 
1177ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1178ae1fc9baSNico Weber void __lsan_enable() {
1179ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1180ae1fc9baSNico Weber   __lsan::EnableInThisThread();
1181ae1fc9baSNico Weber #endif
1182ae1fc9baSNico Weber }
1183ae1fc9baSNico Weber 
1184ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1185ae1fc9baSNico Weber void __lsan_do_leak_check() {
1186ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1187ae1fc9baSNico Weber   if (common_flags()->detect_leaks)
1188ae1fc9baSNico Weber     __lsan::DoLeakCheck();
1189ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1190ae1fc9baSNico Weber }
1191ae1fc9baSNico Weber 
1192ae1fc9baSNico Weber SANITIZER_INTERFACE_ATTRIBUTE
1193ae1fc9baSNico Weber int __lsan_do_recoverable_leak_check() {
1194ae1fc9baSNico Weber #if CAN_SANITIZE_LEAKS
1195ae1fc9baSNico Weber   if (common_flags()->detect_leaks)
1196ae1fc9baSNico Weber     return __lsan::DoRecoverableLeakCheck();
1197ae1fc9baSNico Weber #endif  // CAN_SANITIZE_LEAKS
1198ae1fc9baSNico Weber   return 0;
1199ae1fc9baSNico Weber }
1200ae1fc9baSNico Weber 
12012d7fd38cSFangrui Song SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1202ae1fc9baSNico Weber   return "";
1203ae1fc9baSNico Weber }
1204ae1fc9baSNico Weber 
12052d7fd38cSFangrui Song #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1206e06a81d8SAndrew Ng SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
1207ae1fc9baSNico Weber   return 0;
1208ae1fc9baSNico Weber }
1209ae1fc9baSNico Weber 
1210e06a81d8SAndrew Ng SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
1211ae1fc9baSNico Weber   return "";
1212ae1fc9baSNico Weber }
1213ae1fc9baSNico Weber #endif
1214ae1fc9baSNico Weber }  // extern "C"
1215