1 //=-- lsan_common.cpp -----------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of LeakSanitizer.
10 // Implementation of common leak checking functionality.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "lsan_common.h"
15
16 #include "sanitizer_common/sanitizer_common.h"
17 #include "sanitizer_common/sanitizer_flag_parser.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_procmaps.h"
21 #include "sanitizer_common/sanitizer_report_decorator.h"
22 #include "sanitizer_common/sanitizer_stackdepot.h"
23 #include "sanitizer_common/sanitizer_stacktrace.h"
24 #include "sanitizer_common/sanitizer_suppressions.h"
25 #include "sanitizer_common/sanitizer_thread_registry.h"
26 #include "sanitizer_common/sanitizer_tls_get_addr.h"
27
28 #if CAN_SANITIZE_LEAKS
29 namespace __lsan {
30
31 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
32 // also to protect the global list of root regions.
33 Mutex global_mutex;
34
35 Flags lsan_flags;
36
37
DisableCounterUnderflow()38 void DisableCounterUnderflow() {
39 if (common_flags()->detect_leaks) {
40 Report("Unmatched call to __lsan_enable().\n");
41 Die();
42 }
43 }
44
SetDefaults()45 void Flags::SetDefaults() {
46 #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
47 #include "lsan_flags.inc"
48 #undef LSAN_FLAG
49 }
50
RegisterLsanFlags(FlagParser * parser,Flags * f)51 void RegisterLsanFlags(FlagParser *parser, Flags *f) {
52 #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
53 RegisterFlag(parser, #Name, Description, &f->Name);
54 #include "lsan_flags.inc"
55 #undef LSAN_FLAG
56 }
57
58 #define LOG_POINTERS(...) \
59 do { \
60 if (flags()->log_pointers) Report(__VA_ARGS__); \
61 } while (0)
62
63 #define LOG_THREADS(...) \
64 do { \
65 if (flags()->log_threads) Report(__VA_ARGS__); \
66 } while (0)
67
68 class LeakSuppressionContext {
69 bool parsed = false;
70 SuppressionContext context;
71 bool suppressed_stacks_sorted = true;
72 InternalMmapVector<u32> suppressed_stacks;
73
74 Suppression *GetSuppressionForAddr(uptr addr);
75 void LazyInit();
76
77 public:
LeakSuppressionContext(const char * supprression_types[],int suppression_types_num)78 LeakSuppressionContext(const char *supprression_types[],
79 int suppression_types_num)
80 : context(supprression_types, suppression_types_num) {}
81
82 Suppression *GetSuppressionForStack(u32 stack_trace_id,
83 const StackTrace &stack);
84
GetSortedSuppressedStacks()85 const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
86 if (!suppressed_stacks_sorted) {
87 suppressed_stacks_sorted = true;
88 SortAndDedup(suppressed_stacks);
89 }
90 return suppressed_stacks;
91 }
92 void PrintMatchedSuppressions();
93 };
94
95 ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
96 static LeakSuppressionContext *suppression_ctx = nullptr;
97 static const char kSuppressionLeak[] = "leak";
98 static const char *kSuppressionTypes[] = { kSuppressionLeak };
99 static const char kStdSuppressions[] =
100 #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
101 // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
102 // definition.
103 "leak:*pthread_exit*\n"
104 #endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
105 #if SANITIZER_MAC
106 // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
107 "leak:*_os_trace*\n"
108 #endif
109 // TLS leak in some glibc versions, described in
110 // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
111 "leak:*tls_get_addr*\n";
112
InitializeSuppressions()113 void InitializeSuppressions() {
114 CHECK_EQ(nullptr, suppression_ctx);
115 suppression_ctx = new (suppression_placeholder)
116 LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
117 }
118
LazyInit()119 void LeakSuppressionContext::LazyInit() {
120 if (!parsed) {
121 parsed = true;
122 context.ParseFromFile(flags()->suppressions);
123 if (&__lsan_default_suppressions)
124 context.Parse(__lsan_default_suppressions());
125 context.Parse(kStdSuppressions);
126 }
127 }
128
GetSuppressionContext()129 static LeakSuppressionContext *GetSuppressionContext() {
130 CHECK(suppression_ctx);
131 return suppression_ctx;
132 }
133
134 static InternalMmapVectorNoCtor<RootRegion> root_regions;
135
GetRootRegions()136 InternalMmapVectorNoCtor<RootRegion> const *GetRootRegions() {
137 return &root_regions;
138 }
139
InitCommonLsan()140 void InitCommonLsan() {
141 if (common_flags()->detect_leaks) {
142 // Initialization which can fail or print warnings should only be done if
143 // LSan is actually enabled.
144 InitializeSuppressions();
145 InitializePlatformSpecificModules();
146 }
147 }
148
149 class Decorator: public __sanitizer::SanitizerCommonDecorator {
150 public:
Decorator()151 Decorator() : SanitizerCommonDecorator() { }
Error()152 const char *Error() { return Red(); }
Leak()153 const char *Leak() { return Blue(); }
154 };
155
CanBeAHeapPointer(uptr p)156 static inline bool CanBeAHeapPointer(uptr p) {
157 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
158 // bound on heap addresses.
159 const uptr kMinAddress = 4 * 4096;
160 if (p < kMinAddress) return false;
161 #if defined(__x86_64__)
162 // Accept only canonical form user-space addresses.
163 return ((p >> 47) == 0);
164 #elif defined(__mips64) && defined(_LP64)
165 return ((p >> 40) == 0);
166 #elif defined(__aarch64__)
167 unsigned runtimeVMA =
168 (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
169 return ((p >> runtimeVMA) == 0);
170 #else
171 return true;
172 #endif
173 }
174
175 // Scans the memory range, looking for byte patterns that point into allocator
176 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
177 // There are two usage modes for this function: finding reachable chunks
178 // (|tag| = kReachable) and finding indirectly leaked chunks
179 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
180 // so |frontier| = 0.
ScanRangeForPointers(uptr begin,uptr end,Frontier * frontier,const char * region_type,ChunkTag tag)181 void ScanRangeForPointers(uptr begin, uptr end,
182 Frontier *frontier,
183 const char *region_type, ChunkTag tag) {
184 CHECK(tag == kReachable || tag == kIndirectlyLeaked);
185 const uptr alignment = flags()->pointer_alignment();
186 LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
187 (void *)end);
188 uptr pp = begin;
189 if (pp % alignment)
190 pp = pp + alignment - pp % alignment;
191 for (; pp + sizeof(void *) <= end; pp += alignment) {
192 void *p = *reinterpret_cast<void **>(pp);
193 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
194 uptr chunk = PointsIntoChunk(p);
195 if (!chunk) continue;
196 // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
197 if (chunk == begin) continue;
198 LsanMetadata m(chunk);
199 if (m.tag() == kReachable || m.tag() == kIgnored) continue;
200
201 // Do this check relatively late so we can log only the interesting cases.
202 if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
203 LOG_POINTERS(
204 "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
205 "%zu.\n",
206 (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
207 m.requested_size());
208 continue;
209 }
210
211 m.set_tag(tag);
212 LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
213 (void *)pp, p, (void *)chunk,
214 (void *)(chunk + m.requested_size()), m.requested_size());
215 if (frontier)
216 frontier->push_back(chunk);
217 }
218 }
219
220 // Scans a global range for pointers
ScanGlobalRange(uptr begin,uptr end,Frontier * frontier)221 void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
222 uptr allocator_begin = 0, allocator_end = 0;
223 GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
224 if (begin <= allocator_begin && allocator_begin < end) {
225 CHECK_LE(allocator_begin, allocator_end);
226 CHECK_LE(allocator_end, end);
227 if (begin < allocator_begin)
228 ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
229 kReachable);
230 if (allocator_end < end)
231 ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
232 } else {
233 ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
234 }
235 }
236
ForEachExtraStackRangeCb(uptr begin,uptr end,void * arg)237 void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
238 Frontier *frontier = reinterpret_cast<Frontier *>(arg);
239 ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
240 }
241
242 #if SANITIZER_FUCHSIA
243
244 // Fuchsia handles all threads together with its own callback.
ProcessThreads(SuspendedThreadsList const &,Frontier *)245 static void ProcessThreads(SuspendedThreadsList const &, Frontier *) {}
246
247 #else
248
249 #if SANITIZER_ANDROID
250 // FIXME: Move this out into *libcdep.cpp
251 extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
252 pid_t, void (*cb)(void *, void *, uptr, void *), void *);
253 #endif
254
ProcessThreadRegistry(Frontier * frontier)255 static void ProcessThreadRegistry(Frontier *frontier) {
256 InternalMmapVector<uptr> ptrs;
257 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
258 GetAdditionalThreadContextPtrs, &ptrs);
259
260 for (uptr i = 0; i < ptrs.size(); ++i) {
261 void *ptr = reinterpret_cast<void *>(ptrs[i]);
262 uptr chunk = PointsIntoChunk(ptr);
263 if (!chunk)
264 continue;
265 LsanMetadata m(chunk);
266 if (!m.allocated())
267 continue;
268
269 // Mark as reachable and add to frontier.
270 LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
271 m.set_tag(kReachable);
272 frontier->push_back(chunk);
273 }
274 }
275
276 // Scans thread data (stacks and TLS) for heap pointers.
ProcessThreads(SuspendedThreadsList const & suspended_threads,Frontier * frontier)277 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
278 Frontier *frontier) {
279 InternalMmapVector<uptr> registers;
280 for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
281 tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
282 LOG_THREADS("Processing thread %llu.\n", os_id);
283 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
284 DTLS *dtls;
285 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
286 &tls_begin, &tls_end,
287 &cache_begin, &cache_end, &dtls);
288 if (!thread_found) {
289 // If a thread can't be found in the thread registry, it's probably in the
290 // process of destruction. Log this event and move on.
291 LOG_THREADS("Thread %llu not found in registry.\n", os_id);
292 continue;
293 }
294 uptr sp;
295 PtraceRegistersStatus have_registers =
296 suspended_threads.GetRegistersAndSP(i, ®isters, &sp);
297 if (have_registers != REGISTERS_AVAILABLE) {
298 Report("Unable to get registers from thread %llu.\n", os_id);
299 // If unable to get SP, consider the entire stack to be reachable unless
300 // GetRegistersAndSP failed with ESRCH.
301 if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
302 sp = stack_begin;
303 }
304
305 if (flags()->use_registers && have_registers) {
306 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
307 uptr registers_end =
308 reinterpret_cast<uptr>(registers.data() + registers.size());
309 ScanRangeForPointers(registers_begin, registers_end, frontier,
310 "REGISTERS", kReachable);
311 }
312
313 if (flags()->use_stacks) {
314 LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
315 (void *)stack_end, (void *)sp);
316 if (sp < stack_begin || sp >= stack_end) {
317 // SP is outside the recorded stack range (e.g. the thread is running a
318 // signal handler on alternate stack, or swapcontext was used).
319 // Again, consider the entire stack range to be reachable.
320 LOG_THREADS("WARNING: stack pointer not in stack range.\n");
321 uptr page_size = GetPageSizeCached();
322 int skipped = 0;
323 while (stack_begin < stack_end &&
324 !IsAccessibleMemoryRange(stack_begin, 1)) {
325 skipped++;
326 stack_begin += page_size;
327 }
328 LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
329 skipped, (void *)stack_begin, (void *)stack_end);
330 } else {
331 // Shrink the stack range to ignore out-of-scope values.
332 stack_begin = sp;
333 }
334 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
335 kReachable);
336 ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
337 }
338
339 if (flags()->use_tls) {
340 if (tls_begin) {
341 LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
342 // If the tls and cache ranges don't overlap, scan full tls range,
343 // otherwise, only scan the non-overlapping portions
344 if (cache_begin == cache_end || tls_end < cache_begin ||
345 tls_begin > cache_end) {
346 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
347 } else {
348 if (tls_begin < cache_begin)
349 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
350 kReachable);
351 if (tls_end > cache_end)
352 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
353 kReachable);
354 }
355 }
356 #if SANITIZER_ANDROID
357 auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
358 void *arg) -> void {
359 ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
360 reinterpret_cast<uptr>(dtls_end),
361 reinterpret_cast<Frontier *>(arg), "DTLS",
362 kReachable);
363 };
364
365 // FIXME: There might be a race-condition here (and in Bionic) if the
366 // thread is suspended in the middle of updating its DTLS. IOWs, we
367 // could scan already freed memory. (probably fine for now)
368 __libc_iterate_dynamic_tls(os_id, cb, frontier);
369 #else
370 if (dtls && !DTLSInDestruction(dtls)) {
371 ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
372 uptr dtls_beg = dtv.beg;
373 uptr dtls_end = dtls_beg + dtv.size;
374 if (dtls_beg < dtls_end) {
375 LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
376 (void *)dtls_end);
377 ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
378 kReachable);
379 }
380 });
381 } else {
382 // We are handling a thread with DTLS under destruction. Log about
383 // this and continue.
384 LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
385 }
386 #endif
387 }
388 }
389
390 // Add pointers reachable from ThreadContexts
391 ProcessThreadRegistry(frontier);
392 }
393
394 #endif // SANITIZER_FUCHSIA
395
ScanRootRegion(Frontier * frontier,const RootRegion & root_region,uptr region_begin,uptr region_end,bool is_readable)396 void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
397 uptr region_begin, uptr region_end, bool is_readable) {
398 uptr intersection_begin = Max(root_region.begin, region_begin);
399 uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
400 if (intersection_begin >= intersection_end) return;
401 LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
402 (void *)root_region.begin,
403 (void *)(root_region.begin + root_region.size),
404 (void *)region_begin, (void *)region_end,
405 is_readable ? "readable" : "unreadable");
406 if (is_readable)
407 ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
408 kReachable);
409 }
410
ProcessRootRegion(Frontier * frontier,const RootRegion & root_region)411 static void ProcessRootRegion(Frontier *frontier,
412 const RootRegion &root_region) {
413 MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
414 MemoryMappedSegment segment;
415 while (proc_maps.Next(&segment)) {
416 ScanRootRegion(frontier, root_region, segment.start, segment.end,
417 segment.IsReadable());
418 }
419 }
420
421 // Scans root regions for heap pointers.
ProcessRootRegions(Frontier * frontier)422 static void ProcessRootRegions(Frontier *frontier) {
423 if (!flags()->use_root_regions) return;
424 for (uptr i = 0; i < root_regions.size(); i++)
425 ProcessRootRegion(frontier, root_regions[i]);
426 }
427
FloodFillTag(Frontier * frontier,ChunkTag tag)428 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
429 while (frontier->size()) {
430 uptr next_chunk = frontier->back();
431 frontier->pop_back();
432 LsanMetadata m(next_chunk);
433 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
434 "HEAP", tag);
435 }
436 }
437
438 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
439 // which are reachable from it as indirectly leaked.
MarkIndirectlyLeakedCb(uptr chunk,void * arg)440 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
441 chunk = GetUserBegin(chunk);
442 LsanMetadata m(chunk);
443 if (m.allocated() && m.tag() != kReachable) {
444 ScanRangeForPointers(chunk, chunk + m.requested_size(),
445 /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
446 }
447 }
448
IgnoredSuppressedCb(uptr chunk,void * arg)449 static void IgnoredSuppressedCb(uptr chunk, void *arg) {
450 CHECK(arg);
451 chunk = GetUserBegin(chunk);
452 LsanMetadata m(chunk);
453 if (!m.allocated() || m.tag() == kIgnored)
454 return;
455
456 const InternalMmapVector<u32> &suppressed =
457 *static_cast<const InternalMmapVector<u32> *>(arg);
458 uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
459 if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
460 return;
461
462 LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
463 (void *)(chunk + m.requested_size()), m.requested_size());
464 m.set_tag(kIgnored);
465 }
466
467 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
468 // frontier.
CollectIgnoredCb(uptr chunk,void * arg)469 static void CollectIgnoredCb(uptr chunk, void *arg) {
470 CHECK(arg);
471 chunk = GetUserBegin(chunk);
472 LsanMetadata m(chunk);
473 if (m.allocated() && m.tag() == kIgnored) {
474 LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
475 (void *)(chunk + m.requested_size()), m.requested_size());
476 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
477 }
478 }
479
GetCallerPC(const StackTrace & stack)480 static uptr GetCallerPC(const StackTrace &stack) {
481 // The top frame is our malloc/calloc/etc. The next frame is the caller.
482 if (stack.size >= 2)
483 return stack.trace[1];
484 return 0;
485 }
486
487 struct InvalidPCParam {
488 Frontier *frontier;
489 bool skip_linker_allocations;
490 };
491
492 // ForEachChunk callback. If the caller pc is invalid or is within the linker,
493 // mark as reachable. Called by ProcessPlatformSpecificAllocations.
MarkInvalidPCCb(uptr chunk,void * arg)494 static void MarkInvalidPCCb(uptr chunk, void *arg) {
495 CHECK(arg);
496 InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
497 chunk = GetUserBegin(chunk);
498 LsanMetadata m(chunk);
499 if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
500 u32 stack_id = m.stack_trace_id();
501 uptr caller_pc = 0;
502 if (stack_id > 0)
503 caller_pc = GetCallerPC(StackDepotGet(stack_id));
504 // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
505 // it as reachable, as we can't properly report its allocation stack anyway.
506 if (caller_pc == 0 || (param->skip_linker_allocations &&
507 GetLinker()->containsAddress(caller_pc))) {
508 m.set_tag(kReachable);
509 param->frontier->push_back(chunk);
510 }
511 }
512 }
513
514 // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
515 // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
516 // modules accounting etc.
517 // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
518 // They are allocated with a __libc_memalign() call in allocate_and_init()
519 // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
520 // blocks, but we can make sure they come from our own allocator by intercepting
521 // __libc_memalign(). On top of that, there is no easy way to reach them. Their
522 // addresses are stored in a dynamically allocated array (the DTV) which is
523 // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
524 // being reachable from the static TLS, and the dynamic TLS being reachable from
525 // the DTV. This is because the initial DTV is allocated before our interception
526 // mechanism kicks in, and thus we don't recognize it as allocated memory. We
527 // can't special-case it either, since we don't know its size.
528 // Our solution is to include in the root set all allocations made from
529 // ld-linux.so (which is where allocate_and_init() is implemented). This is
530 // guaranteed to include all dynamic TLS blocks (and possibly other allocations
531 // which we don't care about).
532 // On all other platforms, this simply checks to ensure that the caller pc is
533 // valid before reporting chunks as leaked.
ProcessPC(Frontier * frontier)534 static void ProcessPC(Frontier *frontier) {
535 InvalidPCParam arg;
536 arg.frontier = frontier;
537 arg.skip_linker_allocations =
538 flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
539 ForEachChunk(MarkInvalidPCCb, &arg);
540 }
541
542 // Sets the appropriate tag on each chunk.
ClassifyAllChunks(SuspendedThreadsList const & suspended_threads,Frontier * frontier)543 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
544 Frontier *frontier) {
545 const InternalMmapVector<u32> &suppressed_stacks =
546 GetSuppressionContext()->GetSortedSuppressedStacks();
547 if (!suppressed_stacks.empty()) {
548 ForEachChunk(IgnoredSuppressedCb,
549 const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
550 }
551 ForEachChunk(CollectIgnoredCb, frontier);
552 ProcessGlobalRegions(frontier);
553 ProcessThreads(suspended_threads, frontier);
554 ProcessRootRegions(frontier);
555 FloodFillTag(frontier, kReachable);
556
557 CHECK_EQ(0, frontier->size());
558 ProcessPC(frontier);
559
560 // The check here is relatively expensive, so we do this in a separate flood
561 // fill. That way we can skip the check for chunks that are reachable
562 // otherwise.
563 LOG_POINTERS("Processing platform-specific allocations.\n");
564 ProcessPlatformSpecificAllocations(frontier);
565 FloodFillTag(frontier, kReachable);
566
567 // Iterate over leaked chunks and mark those that are reachable from other
568 // leaked chunks.
569 LOG_POINTERS("Scanning leaked chunks.\n");
570 ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
571 }
572
573 // ForEachChunk callback. Resets the tags to pre-leak-check state.
ResetTagsCb(uptr chunk,void * arg)574 static void ResetTagsCb(uptr chunk, void *arg) {
575 (void)arg;
576 chunk = GetUserBegin(chunk);
577 LsanMetadata m(chunk);
578 if (m.allocated() && m.tag() != kIgnored)
579 m.set_tag(kDirectlyLeaked);
580 }
581
582 // ForEachChunk callback. Aggregates information about unreachable chunks into
583 // a LeakReport.
CollectLeaksCb(uptr chunk,void * arg)584 static void CollectLeaksCb(uptr chunk, void *arg) {
585 CHECK(arg);
586 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
587 chunk = GetUserBegin(chunk);
588 LsanMetadata m(chunk);
589 if (!m.allocated()) return;
590 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
591 leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
592 m.tag());
593 }
594 }
595
PrintMatchedSuppressions()596 void LeakSuppressionContext::PrintMatchedSuppressions() {
597 InternalMmapVector<Suppression *> matched;
598 context.GetMatched(&matched);
599 if (!matched.size())
600 return;
601 const char *line = "-----------------------------------------------------";
602 Printf("%s\n", line);
603 Printf("Suppressions used:\n");
604 Printf(" count bytes template\n");
605 for (uptr i = 0; i < matched.size(); i++) {
606 Printf("%7zu %10zu %s\n",
607 static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
608 matched[i]->weight, matched[i]->templ);
609 }
610 Printf("%s\n\n", line);
611 }
612
ReportIfNotSuspended(ThreadContextBase * tctx,void * arg)613 static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
614 const InternalMmapVector<tid_t> &suspended_threads =
615 *(const InternalMmapVector<tid_t> *)arg;
616 if (tctx->status == ThreadStatusRunning) {
617 uptr i = InternalLowerBound(suspended_threads, tctx->os_id);
618 if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
619 Report(
620 "Running thread %llu was not suspended. False leaks are possible.\n",
621 tctx->os_id);
622 }
623 }
624
625 #if SANITIZER_FUCHSIA
626
627 // Fuchsia provides a libc interface that guarantees all threads are
628 // covered, and SuspendedThreadList is never really used.
ReportUnsuspendedThreads(const SuspendedThreadsList &)629 static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
630
631 #else // !SANITIZER_FUCHSIA
632
ReportUnsuspendedThreads(const SuspendedThreadsList & suspended_threads)633 static void ReportUnsuspendedThreads(
634 const SuspendedThreadsList &suspended_threads) {
635 InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
636 for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
637 threads[i] = suspended_threads.GetThreadID(i);
638
639 Sort(threads.data(), threads.size());
640
641 GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
642 &ReportIfNotSuspended, &threads);
643 }
644
645 #endif // !SANITIZER_FUCHSIA
646
CheckForLeaksCallback(const SuspendedThreadsList & suspended_threads,void * arg)647 static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
648 void *arg) {
649 CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
650 CHECK(param);
651 CHECK(!param->success);
652 ReportUnsuspendedThreads(suspended_threads);
653 ClassifyAllChunks(suspended_threads, ¶m->frontier);
654 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
655 // Clean up for subsequent leak checks. This assumes we did not overwrite any
656 // kIgnored tags.
657 ForEachChunk(ResetTagsCb, nullptr);
658 param->success = true;
659 }
660
PrintResults(LeakReport & report)661 static bool PrintResults(LeakReport &report) {
662 uptr unsuppressed_count = report.UnsuppressedLeakCount();
663 if (unsuppressed_count) {
664 Decorator d;
665 Printf(
666 "\n"
667 "================================================================="
668 "\n");
669 Printf("%s", d.Error());
670 Report("ERROR: LeakSanitizer: detected memory leaks\n");
671 Printf("%s", d.Default());
672 report.ReportTopLeaks(flags()->max_leaks);
673 }
674 if (common_flags()->print_suppressions)
675 GetSuppressionContext()->PrintMatchedSuppressions();
676 if (unsuppressed_count > 0) {
677 report.PrintSummary();
678 return true;
679 }
680 return false;
681 }
682
CheckForLeaks()683 static bool CheckForLeaks() {
684 if (&__lsan_is_turned_off && __lsan_is_turned_off())
685 return false;
686 // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
687 // suppressions. However if a stack id was previously suppressed, it should be
688 // suppressed in future checks as well.
689 for (int i = 0;; ++i) {
690 EnsureMainThreadIDIsCorrect();
691 CheckForLeaksParam param;
692 LockStuffAndStopTheWorld(CheckForLeaksCallback, ¶m);
693 if (!param.success) {
694 Report("LeakSanitizer has encountered a fatal error.\n");
695 Report(
696 "HINT: For debugging, try setting environment variable "
697 "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
698 Report(
699 "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
700 "etc)\n");
701 Die();
702 }
703 // No new suppressions stacks, so rerun will not help and we can report.
704 if (!param.leak_report.ApplySuppressions())
705 return PrintResults(param.leak_report);
706
707 // No indirect leaks to report, so we are done here.
708 if (!param.leak_report.IndirectUnsuppressedLeakCount())
709 return PrintResults(param.leak_report);
710
711 if (i >= 8) {
712 Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
713 return PrintResults(param.leak_report);
714 }
715
716 // We found a new previously unseen suppressed call stack. Rerun to make
717 // sure it does not hold indirect leaks.
718 VReport(1, "Rerun with %zu suppressed stacks.",
719 GetSuppressionContext()->GetSortedSuppressedStacks().size());
720 }
721 }
722
723 static bool has_reported_leaks = false;
HasReportedLeaks()724 bool HasReportedLeaks() { return has_reported_leaks; }
725
DoLeakCheck()726 void DoLeakCheck() {
727 Lock l(&global_mutex);
728 static bool already_done;
729 if (already_done) return;
730 already_done = true;
731 has_reported_leaks = CheckForLeaks();
732 if (has_reported_leaks) HandleLeaks();
733 }
734
DoRecoverableLeakCheck()735 static int DoRecoverableLeakCheck() {
736 Lock l(&global_mutex);
737 bool have_leaks = CheckForLeaks();
738 return have_leaks ? 1 : 0;
739 }
740
DoRecoverableLeakCheckVoid()741 void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
742
GetSuppressionForAddr(uptr addr)743 Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
744 Suppression *s = nullptr;
745
746 // Suppress by module name.
747 if (const char *module_name =
748 Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
749 if (context.Match(module_name, kSuppressionLeak, &s))
750 return s;
751
752 // Suppress by file or function name.
753 SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
754 for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
755 if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
756 context.Match(cur->info.file, kSuppressionLeak, &s)) {
757 break;
758 }
759 }
760 frames->ClearAll();
761 return s;
762 }
763
GetSuppressionForStack(u32 stack_trace_id,const StackTrace & stack)764 Suppression *LeakSuppressionContext::GetSuppressionForStack(
765 u32 stack_trace_id, const StackTrace &stack) {
766 LazyInit();
767 for (uptr i = 0; i < stack.size; i++) {
768 Suppression *s = GetSuppressionForAddr(
769 StackTrace::GetPreviousInstructionPc(stack.trace[i]));
770 if (s) {
771 suppressed_stacks_sorted = false;
772 suppressed_stacks.push_back(stack_trace_id);
773 return s;
774 }
775 }
776 return nullptr;
777 }
778
779 ///// LeakReport implementation. /////
780
781 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
782 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
783 // in real-world applications.
784 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
785 // use a hash table.
786 const uptr kMaxLeaksConsidered = 5000;
787
AddLeakedChunk(uptr chunk,u32 stack_trace_id,uptr leaked_size,ChunkTag tag)788 void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
789 uptr leaked_size, ChunkTag tag) {
790 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
791
792 if (u32 resolution = flags()->resolution) {
793 StackTrace stack = StackDepotGet(stack_trace_id);
794 stack.size = Min(stack.size, resolution);
795 stack_trace_id = StackDepotPut(stack);
796 }
797
798 bool is_directly_leaked = (tag == kDirectlyLeaked);
799 uptr i;
800 for (i = 0; i < leaks_.size(); i++) {
801 if (leaks_[i].stack_trace_id == stack_trace_id &&
802 leaks_[i].is_directly_leaked == is_directly_leaked) {
803 leaks_[i].hit_count++;
804 leaks_[i].total_size += leaked_size;
805 break;
806 }
807 }
808 if (i == leaks_.size()) {
809 if (leaks_.size() == kMaxLeaksConsidered) return;
810 Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
811 is_directly_leaked, /* is_suppressed */ false };
812 leaks_.push_back(leak);
813 }
814 if (flags()->report_objects) {
815 LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
816 leaked_objects_.push_back(obj);
817 }
818 }
819
LeakComparator(const Leak & leak1,const Leak & leak2)820 static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
821 if (leak1.is_directly_leaked == leak2.is_directly_leaked)
822 return leak1.total_size > leak2.total_size;
823 else
824 return leak1.is_directly_leaked;
825 }
826
ReportTopLeaks(uptr num_leaks_to_report)827 void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
828 CHECK(leaks_.size() <= kMaxLeaksConsidered);
829 Printf("\n");
830 if (leaks_.size() == kMaxLeaksConsidered)
831 Printf("Too many leaks! Only the first %zu leaks encountered will be "
832 "reported.\n",
833 kMaxLeaksConsidered);
834
835 uptr unsuppressed_count = UnsuppressedLeakCount();
836 if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
837 Printf("The %zu top leak(s):\n", num_leaks_to_report);
838 Sort(leaks_.data(), leaks_.size(), &LeakComparator);
839 uptr leaks_reported = 0;
840 for (uptr i = 0; i < leaks_.size(); i++) {
841 if (leaks_[i].is_suppressed) continue;
842 PrintReportForLeak(i);
843 leaks_reported++;
844 if (leaks_reported == num_leaks_to_report) break;
845 }
846 if (leaks_reported < unsuppressed_count) {
847 uptr remaining = unsuppressed_count - leaks_reported;
848 Printf("Omitting %zu more leak(s).\n", remaining);
849 }
850 }
851
PrintReportForLeak(uptr index)852 void LeakReport::PrintReportForLeak(uptr index) {
853 Decorator d;
854 Printf("%s", d.Leak());
855 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
856 leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
857 leaks_[index].total_size, leaks_[index].hit_count);
858 Printf("%s", d.Default());
859
860 CHECK(leaks_[index].stack_trace_id);
861 StackDepotGet(leaks_[index].stack_trace_id).Print();
862
863 if (flags()->report_objects) {
864 Printf("Objects leaked above:\n");
865 PrintLeakedObjectsForLeak(index);
866 Printf("\n");
867 }
868 }
869
PrintLeakedObjectsForLeak(uptr index)870 void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
871 u32 leak_id = leaks_[index].id;
872 for (uptr j = 0; j < leaked_objects_.size(); j++) {
873 if (leaked_objects_[j].leak_id == leak_id)
874 Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
875 leaked_objects_[j].size);
876 }
877 }
878
PrintSummary()879 void LeakReport::PrintSummary() {
880 CHECK(leaks_.size() <= kMaxLeaksConsidered);
881 uptr bytes = 0, allocations = 0;
882 for (uptr i = 0; i < leaks_.size(); i++) {
883 if (leaks_[i].is_suppressed) continue;
884 bytes += leaks_[i].total_size;
885 allocations += leaks_[i].hit_count;
886 }
887 InternalScopedString summary;
888 summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
889 allocations);
890 ReportErrorSummary(summary.data());
891 }
892
ApplySuppressions()893 uptr LeakReport::ApplySuppressions() {
894 LeakSuppressionContext *suppressions = GetSuppressionContext();
895 uptr new_suppressions = false;
896 for (uptr i = 0; i < leaks_.size(); i++) {
897 Suppression *s = suppressions->GetSuppressionForStack(
898 leaks_[i].stack_trace_id, StackDepotGet(leaks_[i].stack_trace_id));
899 if (s) {
900 s->weight += leaks_[i].total_size;
901 atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
902 leaks_[i].hit_count);
903 leaks_[i].is_suppressed = true;
904 ++new_suppressions;
905 }
906 }
907 return new_suppressions;
908 }
909
UnsuppressedLeakCount()910 uptr LeakReport::UnsuppressedLeakCount() {
911 uptr result = 0;
912 for (uptr i = 0; i < leaks_.size(); i++)
913 if (!leaks_[i].is_suppressed) result++;
914 return result;
915 }
916
IndirectUnsuppressedLeakCount()917 uptr LeakReport::IndirectUnsuppressedLeakCount() {
918 uptr result = 0;
919 for (uptr i = 0; i < leaks_.size(); i++)
920 if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
921 result++;
922 return result;
923 }
924
925 } // namespace __lsan
926 #else // CAN_SANITIZE_LEAKS
927 namespace __lsan {
InitCommonLsan()928 void InitCommonLsan() { }
DoLeakCheck()929 void DoLeakCheck() { }
DoRecoverableLeakCheckVoid()930 void DoRecoverableLeakCheckVoid() { }
DisableInThisThread()931 void DisableInThisThread() { }
EnableInThisThread()932 void EnableInThisThread() { }
933 }
934 #endif // CAN_SANITIZE_LEAKS
935
936 using namespace __lsan;
937
938 extern "C" {
939 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_ignore_object(const void * p)940 void __lsan_ignore_object(const void *p) {
941 #if CAN_SANITIZE_LEAKS
942 if (!common_flags()->detect_leaks)
943 return;
944 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
945 // locked.
946 Lock l(&global_mutex);
947 IgnoreObjectResult res = IgnoreObjectLocked(p);
948 if (res == kIgnoreObjectInvalid)
949 VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
950 if (res == kIgnoreObjectAlreadyIgnored)
951 VReport(1, "__lsan_ignore_object(): "
952 "heap object at %p is already being ignored\n", p);
953 if (res == kIgnoreObjectSuccess)
954 VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
955 #endif // CAN_SANITIZE_LEAKS
956 }
957
958 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_register_root_region(const void * begin,uptr size)959 void __lsan_register_root_region(const void *begin, uptr size) {
960 #if CAN_SANITIZE_LEAKS
961 Lock l(&global_mutex);
962 RootRegion region = {reinterpret_cast<uptr>(begin), size};
963 root_regions.push_back(region);
964 VReport(1, "Registered root region at %p of size %zu\n", begin, size);
965 #endif // CAN_SANITIZE_LEAKS
966 }
967
968 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_unregister_root_region(const void * begin,uptr size)969 void __lsan_unregister_root_region(const void *begin, uptr size) {
970 #if CAN_SANITIZE_LEAKS
971 Lock l(&global_mutex);
972 bool removed = false;
973 for (uptr i = 0; i < root_regions.size(); i++) {
974 RootRegion region = root_regions[i];
975 if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
976 removed = true;
977 uptr last_index = root_regions.size() - 1;
978 root_regions[i] = root_regions[last_index];
979 root_regions.pop_back();
980 VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
981 break;
982 }
983 }
984 if (!removed) {
985 Report(
986 "__lsan_unregister_root_region(): region at %p of size %zu has not "
987 "been registered.\n",
988 begin, size);
989 Die();
990 }
991 #endif // CAN_SANITIZE_LEAKS
992 }
993
994 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_disable()995 void __lsan_disable() {
996 #if CAN_SANITIZE_LEAKS
997 __lsan::DisableInThisThread();
998 #endif
999 }
1000
1001 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_enable()1002 void __lsan_enable() {
1003 #if CAN_SANITIZE_LEAKS
1004 __lsan::EnableInThisThread();
1005 #endif
1006 }
1007
1008 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_leak_check()1009 void __lsan_do_leak_check() {
1010 #if CAN_SANITIZE_LEAKS
1011 if (common_flags()->detect_leaks)
1012 __lsan::DoLeakCheck();
1013 #endif // CAN_SANITIZE_LEAKS
1014 }
1015
1016 SANITIZER_INTERFACE_ATTRIBUTE
__lsan_do_recoverable_leak_check()1017 int __lsan_do_recoverable_leak_check() {
1018 #if CAN_SANITIZE_LEAKS
1019 if (common_flags()->detect_leaks)
1020 return __lsan::DoRecoverableLeakCheck();
1021 #endif // CAN_SANITIZE_LEAKS
1022 return 0;
1023 }
1024
SANITIZER_INTERFACE_WEAK_DEF(const char *,__lsan_default_options,void)1025 SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
1026 return "";
1027 }
1028
1029 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
1030 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_is_turned_off()1031 int __lsan_is_turned_off() {
1032 return 0;
1033 }
1034
1035 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
__lsan_default_suppressions()1036 const char *__lsan_default_suppressions() {
1037 return "";
1038 }
1039 #endif
1040 } // extern "C"
1041