1*38dfd33dSkamil //=-- lsan_common.h -------------------------------------------------------===//
2*38dfd33dSkamil //
3*38dfd33dSkamil // The LLVM Compiler Infrastructure
4*38dfd33dSkamil //
5*38dfd33dSkamil // This file is distributed under the University of Illinois Open Source
6*38dfd33dSkamil // License. See LICENSE.TXT for details.
7*38dfd33dSkamil //
8*38dfd33dSkamil //===----------------------------------------------------------------------===//
9*38dfd33dSkamil //
10*38dfd33dSkamil // This file is a part of LeakSanitizer.
11*38dfd33dSkamil // Private LSan header.
12*38dfd33dSkamil //
13*38dfd33dSkamil //===----------------------------------------------------------------------===//
14*38dfd33dSkamil
15*38dfd33dSkamil #ifndef LSAN_COMMON_H
16*38dfd33dSkamil #define LSAN_COMMON_H
17*38dfd33dSkamil
18*38dfd33dSkamil #include "sanitizer_common/sanitizer_allocator.h"
19*38dfd33dSkamil #include "sanitizer_common/sanitizer_common.h"
20*38dfd33dSkamil #include "sanitizer_common/sanitizer_internal_defs.h"
21*38dfd33dSkamil #include "sanitizer_common/sanitizer_platform.h"
22*38dfd33dSkamil #include "sanitizer_common/sanitizer_stoptheworld.h"
23*38dfd33dSkamil #include "sanitizer_common/sanitizer_symbolizer.h"
24*38dfd33dSkamil
25*38dfd33dSkamil // LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) thus
26*38dfd33dSkamil // supported for Linux only. Also, LSan doesn't like 32 bit architectures
27*38dfd33dSkamil // because of "small" (4 bytes) pointer size that leads to high false negative
28*38dfd33dSkamil // ratio on large leaks. But we still want to have it for some 32 bit arches
29*38dfd33dSkamil // (e.g. x86), see https://github.com/google/sanitizers/issues/403.
30*38dfd33dSkamil // To enable LeakSanitizer on a new architecture, one needs to implement the
31*38dfd33dSkamil // internal_clone function as well as (probably) adjust the TLS machinery for
32*38dfd33dSkamil // the new architecture inside the sanitizer library.
33*38dfd33dSkamil #if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
34*38dfd33dSkamil (SANITIZER_WORDSIZE == 64) && \
35*38dfd33dSkamil (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
36*38dfd33dSkamil defined(__powerpc64__))
37*38dfd33dSkamil #define CAN_SANITIZE_LEAKS 1
38*38dfd33dSkamil #elif defined(__i386__) && \
39*38dfd33dSkamil (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
40*38dfd33dSkamil #define CAN_SANITIZE_LEAKS 1
41*38dfd33dSkamil #elif defined(__arm__) && \
42*38dfd33dSkamil SANITIZER_LINUX && !SANITIZER_ANDROID
43*38dfd33dSkamil #define CAN_SANITIZE_LEAKS 1
44*38dfd33dSkamil #else
45*38dfd33dSkamil #define CAN_SANITIZE_LEAKS 0
46*38dfd33dSkamil #endif
47*38dfd33dSkamil
48*38dfd33dSkamil namespace __sanitizer {
49*38dfd33dSkamil class FlagParser;
50*38dfd33dSkamil class ThreadRegistry;
51*38dfd33dSkamil struct DTLS;
52*38dfd33dSkamil }
53*38dfd33dSkamil
54*38dfd33dSkamil namespace __lsan {
55*38dfd33dSkamil
56*38dfd33dSkamil // Chunk tags.
57*38dfd33dSkamil enum ChunkTag {
58*38dfd33dSkamil kDirectlyLeaked = 0, // default
59*38dfd33dSkamil kIndirectlyLeaked = 1,
60*38dfd33dSkamil kReachable = 2,
61*38dfd33dSkamil kIgnored = 3
62*38dfd33dSkamil };
63*38dfd33dSkamil
64*38dfd33dSkamil const u32 kInvalidTid = (u32) -1;
65*38dfd33dSkamil
66*38dfd33dSkamil struct Flags {
67*38dfd33dSkamil #define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
68*38dfd33dSkamil #include "lsan_flags.inc"
69*38dfd33dSkamil #undef LSAN_FLAG
70*38dfd33dSkamil
71*38dfd33dSkamil void SetDefaults();
pointer_alignmentFlags72*38dfd33dSkamil uptr pointer_alignment() const {
73*38dfd33dSkamil return use_unaligned ? 1 : sizeof(uptr);
74*38dfd33dSkamil }
75*38dfd33dSkamil };
76*38dfd33dSkamil
77*38dfd33dSkamil extern Flags lsan_flags;
flags()78*38dfd33dSkamil inline Flags *flags() { return &lsan_flags; }
79*38dfd33dSkamil void RegisterLsanFlags(FlagParser *parser, Flags *f);
80*38dfd33dSkamil
81*38dfd33dSkamil struct Leak {
82*38dfd33dSkamil u32 id;
83*38dfd33dSkamil uptr hit_count;
84*38dfd33dSkamil uptr total_size;
85*38dfd33dSkamil u32 stack_trace_id;
86*38dfd33dSkamil bool is_directly_leaked;
87*38dfd33dSkamil bool is_suppressed;
88*38dfd33dSkamil };
89*38dfd33dSkamil
90*38dfd33dSkamil struct LeakedObject {
91*38dfd33dSkamil u32 leak_id;
92*38dfd33dSkamil uptr addr;
93*38dfd33dSkamil uptr size;
94*38dfd33dSkamil };
95*38dfd33dSkamil
96*38dfd33dSkamil // Aggregates leaks by stack trace prefix.
97*38dfd33dSkamil class LeakReport {
98*38dfd33dSkamil public:
LeakReport()99*38dfd33dSkamil LeakReport() {}
100*38dfd33dSkamil void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
101*38dfd33dSkamil ChunkTag tag);
102*38dfd33dSkamil void ReportTopLeaks(uptr max_leaks);
103*38dfd33dSkamil void PrintSummary();
104*38dfd33dSkamil void ApplySuppressions();
105*38dfd33dSkamil uptr UnsuppressedLeakCount();
106*38dfd33dSkamil
107*38dfd33dSkamil private:
108*38dfd33dSkamil void PrintReportForLeak(uptr index);
109*38dfd33dSkamil void PrintLeakedObjectsForLeak(uptr index);
110*38dfd33dSkamil
111*38dfd33dSkamil u32 next_id_ = 0;
112*38dfd33dSkamil InternalMmapVector<Leak> leaks_;
113*38dfd33dSkamil InternalMmapVector<LeakedObject> leaked_objects_;
114*38dfd33dSkamil };
115*38dfd33dSkamil
116*38dfd33dSkamil typedef InternalMmapVector<uptr> Frontier;
117*38dfd33dSkamil
118*38dfd33dSkamil // Platform-specific functions.
119*38dfd33dSkamil void InitializePlatformSpecificModules();
120*38dfd33dSkamil void ProcessGlobalRegions(Frontier *frontier);
121*38dfd33dSkamil void ProcessPlatformSpecificAllocations(Frontier *frontier);
122*38dfd33dSkamil
123*38dfd33dSkamil struct RootRegion {
124*38dfd33dSkamil uptr begin;
125*38dfd33dSkamil uptr size;
126*38dfd33dSkamil };
127*38dfd33dSkamil
128*38dfd33dSkamil InternalMmapVector<RootRegion> const *GetRootRegions();
129*38dfd33dSkamil void ScanRootRegion(Frontier *frontier, RootRegion const ®ion,
130*38dfd33dSkamil uptr region_begin, uptr region_end, bool is_readable);
131*38dfd33dSkamil // Run stoptheworld while holding any platform-specific locks.
132*38dfd33dSkamil void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
133*38dfd33dSkamil
134*38dfd33dSkamil void ScanRangeForPointers(uptr begin, uptr end,
135*38dfd33dSkamil Frontier *frontier,
136*38dfd33dSkamil const char *region_type, ChunkTag tag);
137*38dfd33dSkamil void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
138*38dfd33dSkamil
139*38dfd33dSkamil enum IgnoreObjectResult {
140*38dfd33dSkamil kIgnoreObjectSuccess,
141*38dfd33dSkamil kIgnoreObjectAlreadyIgnored,
142*38dfd33dSkamil kIgnoreObjectInvalid
143*38dfd33dSkamil };
144*38dfd33dSkamil
145*38dfd33dSkamil // Functions called from the parent tool.
146*38dfd33dSkamil const char *MaybeCallLsanDefaultOptions();
147*38dfd33dSkamil void InitCommonLsan();
148*38dfd33dSkamil void DoLeakCheck();
149*38dfd33dSkamil void DoRecoverableLeakCheckVoid();
150*38dfd33dSkamil void DisableCounterUnderflow();
151*38dfd33dSkamil bool DisabledInThisThread();
152*38dfd33dSkamil
153*38dfd33dSkamil // Used to implement __lsan::ScopedDisabler.
154*38dfd33dSkamil void DisableInThisThread();
155*38dfd33dSkamil void EnableInThisThread();
156*38dfd33dSkamil // Can be used to ignore memory allocated by an intercepted
157*38dfd33dSkamil // function.
158*38dfd33dSkamil struct ScopedInterceptorDisabler {
ScopedInterceptorDisablerScopedInterceptorDisabler159*38dfd33dSkamil ScopedInterceptorDisabler() { DisableInThisThread(); }
~ScopedInterceptorDisablerScopedInterceptorDisabler160*38dfd33dSkamil ~ScopedInterceptorDisabler() { EnableInThisThread(); }
161*38dfd33dSkamil };
162*38dfd33dSkamil
163*38dfd33dSkamil // According to Itanium C++ ABI array cookie is a one word containing
164*38dfd33dSkamil // size of allocated array.
IsItaniumABIArrayCookie(uptr chunk_beg,uptr chunk_size,uptr addr)165*38dfd33dSkamil static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
166*38dfd33dSkamil uptr addr) {
167*38dfd33dSkamil return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
168*38dfd33dSkamil *reinterpret_cast<uptr *>(chunk_beg) == 0;
169*38dfd33dSkamil }
170*38dfd33dSkamil
171*38dfd33dSkamil // According to ARM C++ ABI array cookie consists of two words:
172*38dfd33dSkamil // struct array_cookie {
173*38dfd33dSkamil // std::size_t element_size; // element_size != 0
174*38dfd33dSkamil // std::size_t element_count;
175*38dfd33dSkamil // };
IsARMABIArrayCookie(uptr chunk_beg,uptr chunk_size,uptr addr)176*38dfd33dSkamil static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
177*38dfd33dSkamil uptr addr) {
178*38dfd33dSkamil return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
179*38dfd33dSkamil *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
180*38dfd33dSkamil }
181*38dfd33dSkamil
182*38dfd33dSkamil // Special case for "new T[0]" where T is a type with DTOR.
183*38dfd33dSkamil // new T[0] will allocate a cookie (one or two words) for the array size (0)
184*38dfd33dSkamil // and store a pointer to the end of allocated chunk. The actual cookie layout
185*38dfd33dSkamil // varies between platforms according to their C++ ABI implementation.
IsSpecialCaseOfOperatorNew0(uptr chunk_beg,uptr chunk_size,uptr addr)186*38dfd33dSkamil inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
187*38dfd33dSkamil uptr addr) {
188*38dfd33dSkamil #if defined(__arm__)
189*38dfd33dSkamil return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
190*38dfd33dSkamil #else
191*38dfd33dSkamil return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
192*38dfd33dSkamil #endif
193*38dfd33dSkamil }
194*38dfd33dSkamil
195*38dfd33dSkamil // The following must be implemented in the parent tool.
196*38dfd33dSkamil
197*38dfd33dSkamil void ForEachChunk(ForEachChunkCallback callback, void *arg);
198*38dfd33dSkamil // Returns the address range occupied by the global allocator object.
199*38dfd33dSkamil void GetAllocatorGlobalRange(uptr *begin, uptr *end);
200*38dfd33dSkamil // Wrappers for allocator's ForceLock()/ForceUnlock().
201*38dfd33dSkamil void LockAllocator();
202*38dfd33dSkamil void UnlockAllocator();
203*38dfd33dSkamil // Returns true if [addr, addr + sizeof(void *)) is poisoned.
204*38dfd33dSkamil bool WordIsPoisoned(uptr addr);
205*38dfd33dSkamil // Wrappers for ThreadRegistry access.
206*38dfd33dSkamil void LockThreadRegistry();
207*38dfd33dSkamil void UnlockThreadRegistry();
208*38dfd33dSkamil ThreadRegistry *GetThreadRegistryLocked();
209*38dfd33dSkamil bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
210*38dfd33dSkamil uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
211*38dfd33dSkamil uptr *cache_end, DTLS **dtls);
212*38dfd33dSkamil void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
213*38dfd33dSkamil void *arg);
214*38dfd33dSkamil // If called from the main thread, updates the main thread's TID in the thread
215*38dfd33dSkamil // registry. We need this to handle processes that fork() without a subsequent
216*38dfd33dSkamil // exec(), which invalidates the recorded TID. To update it, we must call
217*38dfd33dSkamil // gettid() from the main thread. Our solution is to call this function before
218*38dfd33dSkamil // leak checking and also before every call to pthread_create() (to handle cases
219*38dfd33dSkamil // where leak checking is initiated from a non-main thread).
220*38dfd33dSkamil void EnsureMainThreadIDIsCorrect();
221*38dfd33dSkamil // If p points into a chunk that has been allocated to the user, returns its
222*38dfd33dSkamil // user-visible address. Otherwise, returns 0.
223*38dfd33dSkamil uptr PointsIntoChunk(void *p);
224*38dfd33dSkamil // Returns address of user-visible chunk contained in this allocator chunk.
225*38dfd33dSkamil uptr GetUserBegin(uptr chunk);
226*38dfd33dSkamil // Helper for __lsan_ignore_object().
227*38dfd33dSkamil IgnoreObjectResult IgnoreObjectLocked(const void *p);
228*38dfd33dSkamil
229*38dfd33dSkamil // Return the linker module, if valid for the platform.
230*38dfd33dSkamil LoadedModule *GetLinker();
231*38dfd33dSkamil
232*38dfd33dSkamil // Return true if LSan has finished leak checking and reported leaks.
233*38dfd33dSkamil bool HasReportedLeaks();
234*38dfd33dSkamil
235*38dfd33dSkamil // Run platform-specific leak handlers.
236*38dfd33dSkamil void HandleLeaks();
237*38dfd33dSkamil
238*38dfd33dSkamil // Wrapper for chunk metadata operations.
239*38dfd33dSkamil class LsanMetadata {
240*38dfd33dSkamil public:
241*38dfd33dSkamil // Constructor accepts address of user-visible chunk.
242*38dfd33dSkamil explicit LsanMetadata(uptr chunk);
243*38dfd33dSkamil bool allocated() const;
244*38dfd33dSkamil ChunkTag tag() const;
245*38dfd33dSkamil void set_tag(ChunkTag value);
246*38dfd33dSkamil uptr requested_size() const;
247*38dfd33dSkamil u32 stack_trace_id() const;
248*38dfd33dSkamil private:
249*38dfd33dSkamil void *metadata_;
250*38dfd33dSkamil };
251*38dfd33dSkamil
252*38dfd33dSkamil } // namespace __lsan
253*38dfd33dSkamil
254*38dfd33dSkamil extern "C" {
255*38dfd33dSkamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
256*38dfd33dSkamil const char *__lsan_default_options();
257*38dfd33dSkamil
258*38dfd33dSkamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
259*38dfd33dSkamil int __lsan_is_turned_off();
260*38dfd33dSkamil
261*38dfd33dSkamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
262*38dfd33dSkamil const char *__lsan_default_suppressions();
263*38dfd33dSkamil } // extern "C"
264*38dfd33dSkamil
265*38dfd33dSkamil #endif // LSAN_COMMON_H
266