xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_allocator_combined.h (revision 627f7eb200a4419d89b531d55fccd2ee3ffdcde0)
1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // Part of the Sanitizer Allocator.
9 //
10 //===----------------------------------------------------------------------===//
11 #ifndef SANITIZER_ALLOCATOR_H
12 #error This file must be included inside sanitizer_allocator.h
13 #endif
14 
15 // This class implements a complete memory allocator by using two
16 // internal allocators:
17 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
18 //  When allocating 2^x bytes it should return 2^x aligned chunk.
19 // PrimaryAllocator is used via a local AllocatorCache.
20 // SecondaryAllocator can allocate anything, but is not efficient.
21 template <class PrimaryAllocator, class AllocatorCache,
22           class SecondaryAllocator>  // NOLINT
23 class CombinedAllocator {
24  public:
InitLinkerInitialized(s32 release_to_os_interval_ms)25   void InitLinkerInitialized(s32 release_to_os_interval_ms) {
26     primary_.Init(release_to_os_interval_ms);
27     secondary_.InitLinkerInitialized();
28     stats_.InitLinkerInitialized();
29   }
30 
Init(s32 release_to_os_interval_ms)31   void Init(s32 release_to_os_interval_ms) {
32     primary_.Init(release_to_os_interval_ms);
33     secondary_.Init();
34     stats_.Init();
35   }
36 
Allocate(AllocatorCache * cache,uptr size,uptr alignment)37   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
38     // Returning 0 on malloc(0) may break a lot of code.
39     if (size == 0)
40       size = 1;
41     if (size + alignment < size) {
42       Report("WARNING: %s: CombinedAllocator allocation overflow: "
43              "0x%zx bytes with 0x%zx alignment requested\n",
44              SanitizerToolName, size, alignment);
45       return nullptr;
46     }
47     uptr original_size = size;
48     // If alignment requirements are to be fulfilled by the frontend allocator
49     // rather than by the primary or secondary, passing an alignment lower than
50     // or equal to 8 will prevent any further rounding up, as well as the later
51     // alignment check.
52     if (alignment > 8)
53       size = RoundUpTo(size, alignment);
54     // The primary allocator should return a 2^x aligned allocation when
55     // requested 2^x bytes, hence using the rounded up 'size' when being
56     // serviced by the primary (this is no longer true when the primary is
57     // using a non-fixed base address). The secondary takes care of the
58     // alignment without such requirement, and allocating 'size' would use
59     // extraneous memory, so we employ 'original_size'.
60     void *res;
61     if (primary_.CanAllocate(size, alignment))
62       res = cache->Allocate(&primary_, primary_.ClassID(size));
63     else
64       res = secondary_.Allocate(&stats_, original_size, alignment);
65     if (alignment > 8)
66       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
67     return res;
68   }
69 
ReleaseToOSIntervalMs()70   s32 ReleaseToOSIntervalMs() const {
71     return primary_.ReleaseToOSIntervalMs();
72   }
73 
SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms)74   void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
75     primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
76   }
77 
ForceReleaseToOS()78   void ForceReleaseToOS() {
79     primary_.ForceReleaseToOS();
80   }
81 
Deallocate(AllocatorCache * cache,void * p)82   void Deallocate(AllocatorCache *cache, void *p) {
83     if (!p) return;
84     if (primary_.PointerIsMine(p))
85       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
86     else
87       secondary_.Deallocate(&stats_, p);
88   }
89 
Reallocate(AllocatorCache * cache,void * p,uptr new_size,uptr alignment)90   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
91                    uptr alignment) {
92     if (!p)
93       return Allocate(cache, new_size, alignment);
94     if (!new_size) {
95       Deallocate(cache, p);
96       return nullptr;
97     }
98     CHECK(PointerIsMine(p));
99     uptr old_size = GetActuallyAllocatedSize(p);
100     uptr memcpy_size = Min(new_size, old_size);
101     void *new_p = Allocate(cache, new_size, alignment);
102     if (new_p)
103       internal_memcpy(new_p, p, memcpy_size);
104     Deallocate(cache, p);
105     return new_p;
106   }
107 
PointerIsMine(void * p)108   bool PointerIsMine(void *p) {
109     if (primary_.PointerIsMine(p))
110       return true;
111     return secondary_.PointerIsMine(p);
112   }
113 
FromPrimary(void * p)114   bool FromPrimary(void *p) {
115     return primary_.PointerIsMine(p);
116   }
117 
GetMetaData(const void * p)118   void *GetMetaData(const void *p) {
119     if (primary_.PointerIsMine(p))
120       return primary_.GetMetaData(p);
121     return secondary_.GetMetaData(p);
122   }
123 
GetBlockBegin(const void * p)124   void *GetBlockBegin(const void *p) {
125     if (primary_.PointerIsMine(p))
126       return primary_.GetBlockBegin(p);
127     return secondary_.GetBlockBegin(p);
128   }
129 
130   // This function does the same as GetBlockBegin, but is much faster.
131   // Must be called with the allocator locked.
GetBlockBeginFastLocked(void * p)132   void *GetBlockBeginFastLocked(void *p) {
133     if (primary_.PointerIsMine(p))
134       return primary_.GetBlockBegin(p);
135     return secondary_.GetBlockBeginFastLocked(p);
136   }
137 
GetActuallyAllocatedSize(void * p)138   uptr GetActuallyAllocatedSize(void *p) {
139     if (primary_.PointerIsMine(p))
140       return primary_.GetActuallyAllocatedSize(p);
141     return secondary_.GetActuallyAllocatedSize(p);
142   }
143 
TotalMemoryUsed()144   uptr TotalMemoryUsed() {
145     return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
146   }
147 
TestOnlyUnmap()148   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
149 
InitCache(AllocatorCache * cache)150   void InitCache(AllocatorCache *cache) {
151     cache->Init(&stats_);
152   }
153 
DestroyCache(AllocatorCache * cache)154   void DestroyCache(AllocatorCache *cache) {
155     cache->Destroy(&primary_, &stats_);
156   }
157 
SwallowCache(AllocatorCache * cache)158   void SwallowCache(AllocatorCache *cache) {
159     cache->Drain(&primary_);
160   }
161 
GetStats(AllocatorStatCounters s)162   void GetStats(AllocatorStatCounters s) const {
163     stats_.Get(s);
164   }
165 
PrintStats()166   void PrintStats() {
167     primary_.PrintStats();
168     secondary_.PrintStats();
169   }
170 
171   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
172   // introspection API.
ForceLock()173   void ForceLock() {
174     primary_.ForceLock();
175     secondary_.ForceLock();
176   }
177 
ForceUnlock()178   void ForceUnlock() {
179     secondary_.ForceUnlock();
180     primary_.ForceUnlock();
181   }
182 
183   // Iterate over all existing chunks.
184   // The allocator must be locked when calling this function.
ForEachChunk(ForEachChunkCallback callback,void * arg)185   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
186     primary_.ForEachChunk(callback, arg);
187     secondary_.ForEachChunk(callback, arg);
188   }
189 
190  private:
191   PrimaryAllocator primary_;
192   SecondaryAllocator secondary_;
193   AllocatorGlobalStats stats_;
194 };
195