xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/memprof/memprof_allocator.cpp (revision bdd1243df58e60e85101c09001d9812a789b6bc4)
1e8d8bef9SDimitry Andric //===-- memprof_allocator.cpp --------------------------------------------===//
2e8d8bef9SDimitry Andric //
3e8d8bef9SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4e8d8bef9SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5e8d8bef9SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6e8d8bef9SDimitry Andric //
7e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===//
8e8d8bef9SDimitry Andric //
9e8d8bef9SDimitry Andric // This file is a part of MemProfiler, a memory profiler.
10e8d8bef9SDimitry Andric //
11e8d8bef9SDimitry Andric // Implementation of MemProf's memory allocator, which uses the allocator
12e8d8bef9SDimitry Andric // from sanitizer_common.
13e8d8bef9SDimitry Andric //
14e8d8bef9SDimitry Andric //===----------------------------------------------------------------------===//
15e8d8bef9SDimitry Andric 
16e8d8bef9SDimitry Andric #include "memprof_allocator.h"
17e8d8bef9SDimitry Andric #include "memprof_mapping.h"
18349cc55cSDimitry Andric #include "memprof_mibmap.h"
19349cc55cSDimitry Andric #include "memprof_rawprofile.h"
20e8d8bef9SDimitry Andric #include "memprof_stack.h"
21e8d8bef9SDimitry Andric #include "memprof_thread.h"
221fd87a68SDimitry Andric #include "profile/MemProfData.inc"
23e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_allocator_checks.h"
24e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_allocator_interface.h"
25e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_allocator_report.h"
26e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_errno.h"
27e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_file.h"
28e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_flags.h"
29e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_internal_defs.h"
30349cc55cSDimitry Andric #include "sanitizer_common/sanitizer_procmaps.h"
31e8d8bef9SDimitry Andric #include "sanitizer_common/sanitizer_stackdepot.h"
32e8d8bef9SDimitry Andric 
33e8d8bef9SDimitry Andric #include <sched.h>
34e8d8bef9SDimitry Andric #include <time.h>
35e8d8bef9SDimitry Andric 
36e8d8bef9SDimitry Andric namespace __memprof {
371fd87a68SDimitry Andric namespace {
381fd87a68SDimitry Andric using ::llvm::memprof::MemInfoBlock;
391fd87a68SDimitry Andric 
401fd87a68SDimitry Andric void Print(const MemInfoBlock &M, const u64 id, bool print_terse) {
411fd87a68SDimitry Andric   u64 p;
421fd87a68SDimitry Andric 
431fd87a68SDimitry Andric   if (print_terse) {
4481ad6265SDimitry Andric     p = M.TotalSize * 100 / M.AllocCount;
4581ad6265SDimitry Andric     Printf("MIB:%llu/%u/%llu.%02llu/%u/%u/", id, M.AllocCount, p / 100, p % 100,
4681ad6265SDimitry Andric            M.MinSize, M.MaxSize);
4781ad6265SDimitry Andric     p = M.TotalAccessCount * 100 / M.AllocCount;
4881ad6265SDimitry Andric     Printf("%llu.%02llu/%llu/%llu/", p / 100, p % 100, M.MinAccessCount,
4981ad6265SDimitry Andric            M.MaxAccessCount);
5081ad6265SDimitry Andric     p = M.TotalLifetime * 100 / M.AllocCount;
5181ad6265SDimitry Andric     Printf("%llu.%02llu/%u/%u/", p / 100, p % 100, M.MinLifetime,
5281ad6265SDimitry Andric            M.MaxLifetime);
5381ad6265SDimitry Andric     Printf("%u/%u/%u/%u\n", M.NumMigratedCpu, M.NumLifetimeOverlaps,
5481ad6265SDimitry Andric            M.NumSameAllocCpu, M.NumSameDeallocCpu);
551fd87a68SDimitry Andric   } else {
5681ad6265SDimitry Andric     p = M.TotalSize * 100 / M.AllocCount;
571fd87a68SDimitry Andric     Printf("Memory allocation stack id = %llu\n", id);
581fd87a68SDimitry Andric     Printf("\talloc_count %u, size (ave/min/max) %llu.%02llu / %u / %u\n",
5981ad6265SDimitry Andric            M.AllocCount, p / 100, p % 100, M.MinSize, M.MaxSize);
6081ad6265SDimitry Andric     p = M.TotalAccessCount * 100 / M.AllocCount;
611fd87a68SDimitry Andric     Printf("\taccess_count (ave/min/max): %llu.%02llu / %llu / %llu\n", p / 100,
6281ad6265SDimitry Andric            p % 100, M.MinAccessCount, M.MaxAccessCount);
6381ad6265SDimitry Andric     p = M.TotalLifetime * 100 / M.AllocCount;
641fd87a68SDimitry Andric     Printf("\tlifetime (ave/min/max): %llu.%02llu / %u / %u\n", p / 100,
6581ad6265SDimitry Andric            p % 100, M.MinLifetime, M.MaxLifetime);
661fd87a68SDimitry Andric     Printf("\tnum migrated: %u, num lifetime overlaps: %u, num same alloc "
671fd87a68SDimitry Andric            "cpu: %u, num same dealloc_cpu: %u\n",
6881ad6265SDimitry Andric            M.NumMigratedCpu, M.NumLifetimeOverlaps, M.NumSameAllocCpu,
6981ad6265SDimitry Andric            M.NumSameDeallocCpu);
701fd87a68SDimitry Andric   }
711fd87a68SDimitry Andric }
721fd87a68SDimitry Andric } // namespace
73e8d8bef9SDimitry Andric 
74e8d8bef9SDimitry Andric static int GetCpuId(void) {
75e8d8bef9SDimitry Andric   // _memprof_preinit is called via the preinit_array, which subsequently calls
76e8d8bef9SDimitry Andric   // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu
77e8d8bef9SDimitry Andric   // will seg fault as the address of __vdso_getcpu will be null.
78e8d8bef9SDimitry Andric   if (!memprof_init_done)
79e8d8bef9SDimitry Andric     return -1;
80e8d8bef9SDimitry Andric   return sched_getcpu();
81e8d8bef9SDimitry Andric }
82e8d8bef9SDimitry Andric 
83e8d8bef9SDimitry Andric // Compute the timestamp in ms.
84e8d8bef9SDimitry Andric static int GetTimestamp(void) {
85e8d8bef9SDimitry Andric   // timespec_get will segfault if called from dl_init
86e8d8bef9SDimitry Andric   if (!memprof_timestamp_inited) {
87e8d8bef9SDimitry Andric     // By returning 0, this will be effectively treated as being
88e8d8bef9SDimitry Andric     // timestamped at memprof init time (when memprof_init_timestamp_s
89e8d8bef9SDimitry Andric     // is initialized).
90e8d8bef9SDimitry Andric     return 0;
91e8d8bef9SDimitry Andric   }
92e8d8bef9SDimitry Andric   timespec ts;
93e8d8bef9SDimitry Andric   clock_gettime(CLOCK_REALTIME, &ts);
94e8d8bef9SDimitry Andric   return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000;
95e8d8bef9SDimitry Andric }
96e8d8bef9SDimitry Andric 
97e8d8bef9SDimitry Andric static MemprofAllocator &get_allocator();
98e8d8bef9SDimitry Andric 
99e8d8bef9SDimitry Andric // The memory chunk allocated from the underlying allocator looks like this:
100e8d8bef9SDimitry Andric // H H U U U U U U
101e8d8bef9SDimitry Andric //   H -- ChunkHeader (32 bytes)
102e8d8bef9SDimitry Andric //   U -- user memory.
103e8d8bef9SDimitry Andric 
104e8d8bef9SDimitry Andric // If there is left padding before the ChunkHeader (due to use of memalign),
105e8d8bef9SDimitry Andric // we store a magic value in the first uptr word of the memory block and
106e8d8bef9SDimitry Andric // store the address of ChunkHeader in the next uptr.
107e8d8bef9SDimitry Andric // M B L L L L L L L L L  H H U U U U U U
108e8d8bef9SDimitry Andric //   |                    ^
109e8d8bef9SDimitry Andric //   ---------------------|
110e8d8bef9SDimitry Andric //   M -- magic value kAllocBegMagic
111e8d8bef9SDimitry Andric //   B -- address of ChunkHeader pointing to the first 'H'
112e8d8bef9SDimitry Andric 
113e8d8bef9SDimitry Andric constexpr uptr kMaxAllowedMallocBits = 40;
114e8d8bef9SDimitry Andric 
115e8d8bef9SDimitry Andric // Should be no more than 32-bytes
116e8d8bef9SDimitry Andric struct ChunkHeader {
117e8d8bef9SDimitry Andric   // 1-st 4 bytes.
118e8d8bef9SDimitry Andric   u32 alloc_context_id;
119e8d8bef9SDimitry Andric   // 2-nd 4 bytes
120e8d8bef9SDimitry Andric   u32 cpu_id;
121e8d8bef9SDimitry Andric   // 3-rd 4 bytes
122e8d8bef9SDimitry Andric   u32 timestamp_ms;
123e8d8bef9SDimitry Andric   // 4-th 4 bytes
124e8d8bef9SDimitry Andric   // Note only 1 bit is needed for this flag if we need space in the future for
125e8d8bef9SDimitry Andric   // more fields.
126e8d8bef9SDimitry Andric   u32 from_memalign;
127e8d8bef9SDimitry Andric   // 5-th and 6-th 4 bytes
128e8d8bef9SDimitry Andric   // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this
129e8d8bef9SDimitry Andric   // could be shrunk to kMaxAllowedMallocBits if we need space in the future for
130e8d8bef9SDimitry Andric   // more fields.
131e8d8bef9SDimitry Andric   atomic_uint64_t user_requested_size;
132e8d8bef9SDimitry Andric   // 23 bits available
133e8d8bef9SDimitry Andric   // 7-th and 8-th 4 bytes
134e8d8bef9SDimitry Andric   u64 data_type_id; // TODO: hash of type name
135e8d8bef9SDimitry Andric };
136e8d8bef9SDimitry Andric 
137e8d8bef9SDimitry Andric static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
138e8d8bef9SDimitry Andric COMPILER_CHECK(kChunkHeaderSize == 32);
139e8d8bef9SDimitry Andric 
140e8d8bef9SDimitry Andric struct MemprofChunk : ChunkHeader {
141e8d8bef9SDimitry Andric   uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
142e8d8bef9SDimitry Andric   uptr UsedSize() {
143e8d8bef9SDimitry Andric     return atomic_load(&user_requested_size, memory_order_relaxed);
144e8d8bef9SDimitry Andric   }
145e8d8bef9SDimitry Andric   void *AllocBeg() {
146e8d8bef9SDimitry Andric     if (from_memalign)
147e8d8bef9SDimitry Andric       return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
148e8d8bef9SDimitry Andric     return reinterpret_cast<void *>(this);
149e8d8bef9SDimitry Andric   }
150e8d8bef9SDimitry Andric };
151e8d8bef9SDimitry Andric 
152e8d8bef9SDimitry Andric class LargeChunkHeader {
153e8d8bef9SDimitry Andric   static constexpr uptr kAllocBegMagic =
154e8d8bef9SDimitry Andric       FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL);
155e8d8bef9SDimitry Andric   atomic_uintptr_t magic;
156e8d8bef9SDimitry Andric   MemprofChunk *chunk_header;
157e8d8bef9SDimitry Andric 
158e8d8bef9SDimitry Andric public:
159e8d8bef9SDimitry Andric   MemprofChunk *Get() const {
160e8d8bef9SDimitry Andric     return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic
161e8d8bef9SDimitry Andric                ? chunk_header
162e8d8bef9SDimitry Andric                : nullptr;
163e8d8bef9SDimitry Andric   }
164e8d8bef9SDimitry Andric 
165e8d8bef9SDimitry Andric   void Set(MemprofChunk *p) {
166e8d8bef9SDimitry Andric     if (p) {
167e8d8bef9SDimitry Andric       chunk_header = p;
168e8d8bef9SDimitry Andric       atomic_store(&magic, kAllocBegMagic, memory_order_release);
169e8d8bef9SDimitry Andric       return;
170e8d8bef9SDimitry Andric     }
171e8d8bef9SDimitry Andric 
172e8d8bef9SDimitry Andric     uptr old = kAllocBegMagic;
173e8d8bef9SDimitry Andric     if (!atomic_compare_exchange_strong(&magic, &old, 0,
174e8d8bef9SDimitry Andric                                         memory_order_release)) {
175e8d8bef9SDimitry Andric       CHECK_EQ(old, kAllocBegMagic);
176e8d8bef9SDimitry Andric     }
177e8d8bef9SDimitry Andric   }
178e8d8bef9SDimitry Andric };
179e8d8bef9SDimitry Andric 
180e8d8bef9SDimitry Andric void FlushUnneededMemProfShadowMemory(uptr p, uptr size) {
181e8d8bef9SDimitry Andric   // Since memprof's mapping is compacting, the shadow chunk may be
182e8d8bef9SDimitry Andric   // not page-aligned, so we only flush the page-aligned portion.
183e8d8bef9SDimitry Andric   ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
184e8d8bef9SDimitry Andric }
185e8d8bef9SDimitry Andric 
186e8d8bef9SDimitry Andric void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const {
187e8d8bef9SDimitry Andric   // Statistics.
188e8d8bef9SDimitry Andric   MemprofStats &thread_stats = GetCurrentThreadStats();
189e8d8bef9SDimitry Andric   thread_stats.mmaps++;
190e8d8bef9SDimitry Andric   thread_stats.mmaped += size;
191e8d8bef9SDimitry Andric }
192e8d8bef9SDimitry Andric void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
193e8d8bef9SDimitry Andric   // We are about to unmap a chunk of user memory.
194e8d8bef9SDimitry Andric   // Mark the corresponding shadow memory as not needed.
195e8d8bef9SDimitry Andric   FlushUnneededMemProfShadowMemory(p, size);
196e8d8bef9SDimitry Andric   // Statistics.
197e8d8bef9SDimitry Andric   MemprofStats &thread_stats = GetCurrentThreadStats();
198e8d8bef9SDimitry Andric   thread_stats.munmaps++;
199e8d8bef9SDimitry Andric   thread_stats.munmaped += size;
200e8d8bef9SDimitry Andric }
201e8d8bef9SDimitry Andric 
202e8d8bef9SDimitry Andric AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) {
203e8d8bef9SDimitry Andric   CHECK(ms);
204e8d8bef9SDimitry Andric   return &ms->allocator_cache;
205e8d8bef9SDimitry Andric }
206e8d8bef9SDimitry Andric 
207e8d8bef9SDimitry Andric // Accumulates the access count from the shadow for the given pointer and size.
208e8d8bef9SDimitry Andric u64 GetShadowCount(uptr p, u32 size) {
209e8d8bef9SDimitry Andric   u64 *shadow = (u64 *)MEM_TO_SHADOW(p);
210e8d8bef9SDimitry Andric   u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size);
211e8d8bef9SDimitry Andric   u64 count = 0;
212e8d8bef9SDimitry Andric   for (; shadow <= shadow_end; shadow++)
213e8d8bef9SDimitry Andric     count += *shadow;
214e8d8bef9SDimitry Andric   return count;
215e8d8bef9SDimitry Andric }
216e8d8bef9SDimitry Andric 
217e8d8bef9SDimitry Andric // Clears the shadow counters (when memory is allocated).
218e8d8bef9SDimitry Andric void ClearShadow(uptr addr, uptr size) {
219e8d8bef9SDimitry Andric   CHECK(AddrIsAlignedByGranularity(addr));
220e8d8bef9SDimitry Andric   CHECK(AddrIsInMem(addr));
221e8d8bef9SDimitry Andric   CHECK(AddrIsAlignedByGranularity(addr + size));
222e8d8bef9SDimitry Andric   CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
223e8d8bef9SDimitry Andric   CHECK(REAL(memset));
224e8d8bef9SDimitry Andric   uptr shadow_beg = MEM_TO_SHADOW(addr);
225e8d8bef9SDimitry Andric   uptr shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1;
226e8d8bef9SDimitry Andric   if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
227e8d8bef9SDimitry Andric     REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
228e8d8bef9SDimitry Andric   } else {
229e8d8bef9SDimitry Andric     uptr page_size = GetPageSizeCached();
230e8d8bef9SDimitry Andric     uptr page_beg = RoundUpTo(shadow_beg, page_size);
231e8d8bef9SDimitry Andric     uptr page_end = RoundDownTo(shadow_end, page_size);
232e8d8bef9SDimitry Andric 
233e8d8bef9SDimitry Andric     if (page_beg >= page_end) {
234e8d8bef9SDimitry Andric       REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
235e8d8bef9SDimitry Andric     } else {
236e8d8bef9SDimitry Andric       if (page_beg != shadow_beg) {
237e8d8bef9SDimitry Andric         REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
238e8d8bef9SDimitry Andric       }
239e8d8bef9SDimitry Andric       if (page_end != shadow_end) {
240e8d8bef9SDimitry Andric         REAL(memset)((void *)page_end, 0, shadow_end - page_end);
241e8d8bef9SDimitry Andric       }
242e8d8bef9SDimitry Andric       ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
243e8d8bef9SDimitry Andric     }
244e8d8bef9SDimitry Andric   }
245e8d8bef9SDimitry Andric }
246e8d8bef9SDimitry Andric 
247e8d8bef9SDimitry Andric struct Allocator {
248e8d8bef9SDimitry Andric   static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits;
249e8d8bef9SDimitry Andric 
250e8d8bef9SDimitry Andric   MemprofAllocator allocator;
251e8d8bef9SDimitry Andric   StaticSpinMutex fallback_mutex;
252e8d8bef9SDimitry Andric   AllocatorCache fallback_allocator_cache;
253e8d8bef9SDimitry Andric 
254e8d8bef9SDimitry Andric   uptr max_user_defined_malloc_size;
255e8d8bef9SDimitry Andric 
256349cc55cSDimitry Andric   // Holds the mapping of stack ids to MemInfoBlocks.
257349cc55cSDimitry Andric   MIBMapTy MIBMap;
258349cc55cSDimitry Andric 
259349cc55cSDimitry Andric   atomic_uint8_t destructing;
260349cc55cSDimitry Andric   atomic_uint8_t constructed;
261349cc55cSDimitry Andric   bool print_text;
262e8d8bef9SDimitry Andric 
263e8d8bef9SDimitry Andric   // ------------------- Initialization ------------------------
264349cc55cSDimitry Andric   explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) {
265349cc55cSDimitry Andric     atomic_store_relaxed(&destructing, 0);
266349cc55cSDimitry Andric     atomic_store_relaxed(&constructed, 1);
267349cc55cSDimitry Andric   }
268e8d8bef9SDimitry Andric 
269349cc55cSDimitry Andric   ~Allocator() {
270349cc55cSDimitry Andric     atomic_store_relaxed(&destructing, 1);
271349cc55cSDimitry Andric     FinishAndWrite();
272349cc55cSDimitry Andric   }
273e8d8bef9SDimitry Andric 
274349cc55cSDimitry Andric   static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value,
275349cc55cSDimitry Andric                             void *Arg) {
276*bdd1243dSDimitry Andric     SpinMutexLock l(&Value->mutex);
2771fd87a68SDimitry Andric     Print(Value->mib, Key, bool(Arg));
278349cc55cSDimitry Andric   }
279349cc55cSDimitry Andric 
280349cc55cSDimitry Andric   void FinishAndWrite() {
281349cc55cSDimitry Andric     if (print_text && common_flags()->print_module_map)
282349cc55cSDimitry Andric       DumpProcessMap();
283349cc55cSDimitry Andric 
284e8d8bef9SDimitry Andric     allocator.ForceLock();
285349cc55cSDimitry Andric 
286349cc55cSDimitry Andric     InsertLiveBlocks();
287349cc55cSDimitry Andric     if (print_text) {
2884824e7fdSDimitry Andric       if (!flags()->print_terse)
2894824e7fdSDimitry Andric         Printf("Recorded MIBs (incl. live on exit):\n");
290349cc55cSDimitry Andric       MIBMap.ForEach(PrintCallback,
291349cc55cSDimitry Andric                      reinterpret_cast<void *>(flags()->print_terse));
292349cc55cSDimitry Andric       StackDepotPrintAll();
293349cc55cSDimitry Andric     } else {
294349cc55cSDimitry Andric       // Serialize the contents to a raw profile. Format documented in
295349cc55cSDimitry Andric       // memprof_rawprofile.h.
296349cc55cSDimitry Andric       char *Buffer = nullptr;
297349cc55cSDimitry Andric 
298349cc55cSDimitry Andric       MemoryMappingLayout Layout(/*cache_enabled=*/true);
299349cc55cSDimitry Andric       u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer);
300349cc55cSDimitry Andric       CHECK(Buffer && BytesSerialized && "could not serialize to buffer");
301349cc55cSDimitry Andric       report_file.Write(Buffer, BytesSerialized);
302349cc55cSDimitry Andric     }
303349cc55cSDimitry Andric 
304349cc55cSDimitry Andric     allocator.ForceUnlock();
305349cc55cSDimitry Andric   }
306349cc55cSDimitry Andric 
307349cc55cSDimitry Andric   // Inserts any blocks which have been allocated but not yet deallocated.
308349cc55cSDimitry Andric   void InsertLiveBlocks() {
309e8d8bef9SDimitry Andric     allocator.ForEachChunk(
310e8d8bef9SDimitry Andric         [](uptr chunk, void *alloc) {
311e8d8bef9SDimitry Andric           u64 user_requested_size;
312349cc55cSDimitry Andric           Allocator *A = (Allocator *)alloc;
313e8d8bef9SDimitry Andric           MemprofChunk *m =
314349cc55cSDimitry Andric               A->GetMemprofChunk((void *)chunk, user_requested_size);
315e8d8bef9SDimitry Andric           if (!m)
316e8d8bef9SDimitry Andric             return;
317e8d8bef9SDimitry Andric           uptr user_beg = ((uptr)m) + kChunkHeaderSize;
318e8d8bef9SDimitry Andric           u64 c = GetShadowCount(user_beg, user_requested_size);
319e8d8bef9SDimitry Andric           long curtime = GetTimestamp();
320e8d8bef9SDimitry Andric           MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
321e8d8bef9SDimitry Andric                               m->cpu_id, GetCpuId());
322349cc55cSDimitry Andric           InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap);
323e8d8bef9SDimitry Andric         },
324e8d8bef9SDimitry Andric         this);
325e8d8bef9SDimitry Andric   }
326e8d8bef9SDimitry Andric 
327e8d8bef9SDimitry Andric   void InitLinkerInitialized() {
328e8d8bef9SDimitry Andric     SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
329e8d8bef9SDimitry Andric     allocator.InitLinkerInitialized(
330e8d8bef9SDimitry Andric         common_flags()->allocator_release_to_os_interval_ms);
331e8d8bef9SDimitry Andric     max_user_defined_malloc_size = common_flags()->max_allocation_size_mb
332e8d8bef9SDimitry Andric                                        ? common_flags()->max_allocation_size_mb
333e8d8bef9SDimitry Andric                                              << 20
334e8d8bef9SDimitry Andric                                        : kMaxAllowedMallocSize;
335e8d8bef9SDimitry Andric   }
336e8d8bef9SDimitry Andric 
337e8d8bef9SDimitry Andric   // -------------------- Allocation/Deallocation routines ---------------
338e8d8bef9SDimitry Andric   void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
339e8d8bef9SDimitry Andric                  AllocType alloc_type) {
340e8d8bef9SDimitry Andric     if (UNLIKELY(!memprof_inited))
341e8d8bef9SDimitry Andric       MemprofInitFromRtl();
3420eae32dcSDimitry Andric     if (UNLIKELY(IsRssLimitExceeded())) {
343e8d8bef9SDimitry Andric       if (AllocatorMayReturnNull())
344e8d8bef9SDimitry Andric         return nullptr;
345e8d8bef9SDimitry Andric       ReportRssLimitExceeded(stack);
346e8d8bef9SDimitry Andric     }
347e8d8bef9SDimitry Andric     CHECK(stack);
348e8d8bef9SDimitry Andric     const uptr min_alignment = MEMPROF_ALIGNMENT;
349e8d8bef9SDimitry Andric     if (alignment < min_alignment)
350e8d8bef9SDimitry Andric       alignment = min_alignment;
351e8d8bef9SDimitry Andric     if (size == 0) {
352e8d8bef9SDimitry Andric       // We'd be happy to avoid allocating memory for zero-size requests, but
353e8d8bef9SDimitry Andric       // some programs/tests depend on this behavior and assume that malloc
354e8d8bef9SDimitry Andric       // would not return NULL even for zero-size allocations. Moreover, it
355e8d8bef9SDimitry Andric       // looks like operator new should never return NULL, and results of
356e8d8bef9SDimitry Andric       // consecutive "new" calls must be different even if the allocated size
357e8d8bef9SDimitry Andric       // is zero.
358e8d8bef9SDimitry Andric       size = 1;
359e8d8bef9SDimitry Andric     }
360e8d8bef9SDimitry Andric     CHECK(IsPowerOfTwo(alignment));
361e8d8bef9SDimitry Andric     uptr rounded_size = RoundUpTo(size, alignment);
362e8d8bef9SDimitry Andric     uptr needed_size = rounded_size + kChunkHeaderSize;
363e8d8bef9SDimitry Andric     if (alignment > min_alignment)
364e8d8bef9SDimitry Andric       needed_size += alignment;
365e8d8bef9SDimitry Andric     CHECK(IsAligned(needed_size, min_alignment));
366e8d8bef9SDimitry Andric     if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize ||
367e8d8bef9SDimitry Andric         size > max_user_defined_malloc_size) {
368e8d8bef9SDimitry Andric       if (AllocatorMayReturnNull()) {
369349cc55cSDimitry Andric         Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size);
370e8d8bef9SDimitry Andric         return nullptr;
371e8d8bef9SDimitry Andric       }
372e8d8bef9SDimitry Andric       uptr malloc_limit =
373e8d8bef9SDimitry Andric           Min(kMaxAllowedMallocSize, max_user_defined_malloc_size);
374e8d8bef9SDimitry Andric       ReportAllocationSizeTooBig(size, malloc_limit, stack);
375e8d8bef9SDimitry Andric     }
376e8d8bef9SDimitry Andric 
377e8d8bef9SDimitry Andric     MemprofThread *t = GetCurrentThread();
378e8d8bef9SDimitry Andric     void *allocated;
379e8d8bef9SDimitry Andric     if (t) {
380e8d8bef9SDimitry Andric       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
381e8d8bef9SDimitry Andric       allocated = allocator.Allocate(cache, needed_size, 8);
382e8d8bef9SDimitry Andric     } else {
383e8d8bef9SDimitry Andric       SpinMutexLock l(&fallback_mutex);
384e8d8bef9SDimitry Andric       AllocatorCache *cache = &fallback_allocator_cache;
385e8d8bef9SDimitry Andric       allocated = allocator.Allocate(cache, needed_size, 8);
386e8d8bef9SDimitry Andric     }
387e8d8bef9SDimitry Andric     if (UNLIKELY(!allocated)) {
388e8d8bef9SDimitry Andric       SetAllocatorOutOfMemory();
389e8d8bef9SDimitry Andric       if (AllocatorMayReturnNull())
390e8d8bef9SDimitry Andric         return nullptr;
391e8d8bef9SDimitry Andric       ReportOutOfMemory(size, stack);
392e8d8bef9SDimitry Andric     }
393e8d8bef9SDimitry Andric 
394e8d8bef9SDimitry Andric     uptr alloc_beg = reinterpret_cast<uptr>(allocated);
395e8d8bef9SDimitry Andric     uptr alloc_end = alloc_beg + needed_size;
396e8d8bef9SDimitry Andric     uptr beg_plus_header = alloc_beg + kChunkHeaderSize;
397e8d8bef9SDimitry Andric     uptr user_beg = beg_plus_header;
398e8d8bef9SDimitry Andric     if (!IsAligned(user_beg, alignment))
399e8d8bef9SDimitry Andric       user_beg = RoundUpTo(user_beg, alignment);
400e8d8bef9SDimitry Andric     uptr user_end = user_beg + size;
401e8d8bef9SDimitry Andric     CHECK_LE(user_end, alloc_end);
402e8d8bef9SDimitry Andric     uptr chunk_beg = user_beg - kChunkHeaderSize;
403e8d8bef9SDimitry Andric     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
404e8d8bef9SDimitry Andric     m->from_memalign = alloc_beg != chunk_beg;
405e8d8bef9SDimitry Andric     CHECK(size);
406e8d8bef9SDimitry Andric 
407e8d8bef9SDimitry Andric     m->cpu_id = GetCpuId();
408e8d8bef9SDimitry Andric     m->timestamp_ms = GetTimestamp();
409e8d8bef9SDimitry Andric     m->alloc_context_id = StackDepotPut(*stack);
410e8d8bef9SDimitry Andric 
411e8d8bef9SDimitry Andric     uptr size_rounded_down_to_granularity =
412e8d8bef9SDimitry Andric         RoundDownTo(size, SHADOW_GRANULARITY);
413e8d8bef9SDimitry Andric     if (size_rounded_down_to_granularity)
414e8d8bef9SDimitry Andric       ClearShadow(user_beg, size_rounded_down_to_granularity);
415e8d8bef9SDimitry Andric 
416e8d8bef9SDimitry Andric     MemprofStats &thread_stats = GetCurrentThreadStats();
417e8d8bef9SDimitry Andric     thread_stats.mallocs++;
418e8d8bef9SDimitry Andric     thread_stats.malloced += size;
419e8d8bef9SDimitry Andric     thread_stats.malloced_overhead += needed_size - size;
420e8d8bef9SDimitry Andric     if (needed_size > SizeClassMap::kMaxSize)
421e8d8bef9SDimitry Andric       thread_stats.malloc_large++;
422e8d8bef9SDimitry Andric     else
423e8d8bef9SDimitry Andric       thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
424e8d8bef9SDimitry Andric 
425e8d8bef9SDimitry Andric     void *res = reinterpret_cast<void *>(user_beg);
426e8d8bef9SDimitry Andric     atomic_store(&m->user_requested_size, size, memory_order_release);
427e8d8bef9SDimitry Andric     if (alloc_beg != chunk_beg) {
428e8d8bef9SDimitry Andric       CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg);
429e8d8bef9SDimitry Andric       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m);
430e8d8bef9SDimitry Andric     }
43181ad6265SDimitry Andric     RunMallocHooks(res, size);
432e8d8bef9SDimitry Andric     return res;
433e8d8bef9SDimitry Andric   }
434e8d8bef9SDimitry Andric 
435e8d8bef9SDimitry Andric   void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
436e8d8bef9SDimitry Andric                   BufferedStackTrace *stack, AllocType alloc_type) {
437e8d8bef9SDimitry Andric     uptr p = reinterpret_cast<uptr>(ptr);
438e8d8bef9SDimitry Andric     if (p == 0)
439e8d8bef9SDimitry Andric       return;
440e8d8bef9SDimitry Andric 
44181ad6265SDimitry Andric     RunFreeHooks(ptr);
442e8d8bef9SDimitry Andric 
443e8d8bef9SDimitry Andric     uptr chunk_beg = p - kChunkHeaderSize;
444e8d8bef9SDimitry Andric     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
445e8d8bef9SDimitry Andric 
446e8d8bef9SDimitry Andric     u64 user_requested_size =
447e8d8bef9SDimitry Andric         atomic_exchange(&m->user_requested_size, 0, memory_order_acquire);
448349cc55cSDimitry Andric     if (memprof_inited && memprof_init_done &&
449349cc55cSDimitry Andric         atomic_load_relaxed(&constructed) &&
450349cc55cSDimitry Andric         !atomic_load_relaxed(&destructing)) {
451e8d8bef9SDimitry Andric       u64 c = GetShadowCount(p, user_requested_size);
452e8d8bef9SDimitry Andric       long curtime = GetTimestamp();
453e8d8bef9SDimitry Andric 
454e8d8bef9SDimitry Andric       MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime,
455e8d8bef9SDimitry Andric                           m->cpu_id, GetCpuId());
456349cc55cSDimitry Andric       InsertOrMerge(m->alloc_context_id, newMIB, MIBMap);
457e8d8bef9SDimitry Andric     }
458e8d8bef9SDimitry Andric 
459e8d8bef9SDimitry Andric     MemprofStats &thread_stats = GetCurrentThreadStats();
460e8d8bef9SDimitry Andric     thread_stats.frees++;
461e8d8bef9SDimitry Andric     thread_stats.freed += user_requested_size;
462e8d8bef9SDimitry Andric 
463e8d8bef9SDimitry Andric     void *alloc_beg = m->AllocBeg();
464e8d8bef9SDimitry Andric     if (alloc_beg != m) {
465e8d8bef9SDimitry Andric       // Clear the magic value, as allocator internals may overwrite the
466e8d8bef9SDimitry Andric       // contents of deallocated chunk, confusing GetMemprofChunk lookup.
467e8d8bef9SDimitry Andric       reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr);
468e8d8bef9SDimitry Andric     }
469e8d8bef9SDimitry Andric 
470e8d8bef9SDimitry Andric     MemprofThread *t = GetCurrentThread();
471e8d8bef9SDimitry Andric     if (t) {
472e8d8bef9SDimitry Andric       AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
473e8d8bef9SDimitry Andric       allocator.Deallocate(cache, alloc_beg);
474e8d8bef9SDimitry Andric     } else {
475e8d8bef9SDimitry Andric       SpinMutexLock l(&fallback_mutex);
476e8d8bef9SDimitry Andric       AllocatorCache *cache = &fallback_allocator_cache;
477e8d8bef9SDimitry Andric       allocator.Deallocate(cache, alloc_beg);
478e8d8bef9SDimitry Andric     }
479e8d8bef9SDimitry Andric   }
480e8d8bef9SDimitry Andric 
481e8d8bef9SDimitry Andric   void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
482e8d8bef9SDimitry Andric     CHECK(old_ptr && new_size);
483e8d8bef9SDimitry Andric     uptr p = reinterpret_cast<uptr>(old_ptr);
484e8d8bef9SDimitry Andric     uptr chunk_beg = p - kChunkHeaderSize;
485e8d8bef9SDimitry Andric     MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg);
486e8d8bef9SDimitry Andric 
487e8d8bef9SDimitry Andric     MemprofStats &thread_stats = GetCurrentThreadStats();
488e8d8bef9SDimitry Andric     thread_stats.reallocs++;
489e8d8bef9SDimitry Andric     thread_stats.realloced += new_size;
490e8d8bef9SDimitry Andric 
491e8d8bef9SDimitry Andric     void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC);
492e8d8bef9SDimitry Andric     if (new_ptr) {
493e8d8bef9SDimitry Andric       CHECK_NE(REAL(memcpy), nullptr);
494e8d8bef9SDimitry Andric       uptr memcpy_size = Min(new_size, m->UsedSize());
495e8d8bef9SDimitry Andric       REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
496e8d8bef9SDimitry Andric       Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
497e8d8bef9SDimitry Andric     }
498e8d8bef9SDimitry Andric     return new_ptr;
499e8d8bef9SDimitry Andric   }
500e8d8bef9SDimitry Andric 
501e8d8bef9SDimitry Andric   void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
502e8d8bef9SDimitry Andric     if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
503e8d8bef9SDimitry Andric       if (AllocatorMayReturnNull())
504e8d8bef9SDimitry Andric         return nullptr;
505e8d8bef9SDimitry Andric       ReportCallocOverflow(nmemb, size, stack);
506e8d8bef9SDimitry Andric     }
507e8d8bef9SDimitry Andric     void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC);
508e8d8bef9SDimitry Andric     // If the memory comes from the secondary allocator no need to clear it
509e8d8bef9SDimitry Andric     // as it comes directly from mmap.
510e8d8bef9SDimitry Andric     if (ptr && allocator.FromPrimary(ptr))
511e8d8bef9SDimitry Andric       REAL(memset)(ptr, 0, nmemb * size);
512e8d8bef9SDimitry Andric     return ptr;
513e8d8bef9SDimitry Andric   }
514e8d8bef9SDimitry Andric 
515e8d8bef9SDimitry Andric   void CommitBack(MemprofThreadLocalMallocStorage *ms,
516e8d8bef9SDimitry Andric                   BufferedStackTrace *stack) {
517e8d8bef9SDimitry Andric     AllocatorCache *ac = GetAllocatorCache(ms);
518e8d8bef9SDimitry Andric     allocator.SwallowCache(ac);
519e8d8bef9SDimitry Andric   }
520e8d8bef9SDimitry Andric 
521e8d8bef9SDimitry Andric   // -------------------------- Chunk lookup ----------------------
522e8d8bef9SDimitry Andric 
523e8d8bef9SDimitry Andric   // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
524e8d8bef9SDimitry Andric   MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) {
525e8d8bef9SDimitry Andric     if (!alloc_beg)
526e8d8bef9SDimitry Andric       return nullptr;
527e8d8bef9SDimitry Andric     MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get();
528e8d8bef9SDimitry Andric     if (!p) {
529e8d8bef9SDimitry Andric       if (!allocator.FromPrimary(alloc_beg))
530e8d8bef9SDimitry Andric         return nullptr;
531e8d8bef9SDimitry Andric       p = reinterpret_cast<MemprofChunk *>(alloc_beg);
532e8d8bef9SDimitry Andric     }
533e8d8bef9SDimitry Andric     // The size is reset to 0 on deallocation (and a min of 1 on
534e8d8bef9SDimitry Andric     // allocation).
535e8d8bef9SDimitry Andric     user_requested_size =
536e8d8bef9SDimitry Andric         atomic_load(&p->user_requested_size, memory_order_acquire);
537e8d8bef9SDimitry Andric     if (user_requested_size)
538e8d8bef9SDimitry Andric       return p;
539e8d8bef9SDimitry Andric     return nullptr;
540e8d8bef9SDimitry Andric   }
541e8d8bef9SDimitry Andric 
542e8d8bef9SDimitry Andric   MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) {
543e8d8bef9SDimitry Andric     void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
544e8d8bef9SDimitry Andric     return GetMemprofChunk(alloc_beg, user_requested_size);
545e8d8bef9SDimitry Andric   }
546e8d8bef9SDimitry Andric 
547e8d8bef9SDimitry Andric   uptr AllocationSize(uptr p) {
548e8d8bef9SDimitry Andric     u64 user_requested_size;
549e8d8bef9SDimitry Andric     MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size);
550e8d8bef9SDimitry Andric     if (!m)
551e8d8bef9SDimitry Andric       return 0;
552e8d8bef9SDimitry Andric     if (m->Beg() != p)
553e8d8bef9SDimitry Andric       return 0;
554e8d8bef9SDimitry Andric     return user_requested_size;
555e8d8bef9SDimitry Andric   }
556e8d8bef9SDimitry Andric 
557e8d8bef9SDimitry Andric   void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); }
558e8d8bef9SDimitry Andric 
559e8d8bef9SDimitry Andric   void PrintStats() { allocator.PrintStats(); }
560e8d8bef9SDimitry Andric 
56104eeddc0SDimitry Andric   void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
562e8d8bef9SDimitry Andric     allocator.ForceLock();
563e8d8bef9SDimitry Andric     fallback_mutex.Lock();
564e8d8bef9SDimitry Andric   }
565e8d8bef9SDimitry Andric 
56604eeddc0SDimitry Andric   void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
567e8d8bef9SDimitry Andric     fallback_mutex.Unlock();
568e8d8bef9SDimitry Andric     allocator.ForceUnlock();
569e8d8bef9SDimitry Andric   }
570e8d8bef9SDimitry Andric };
571e8d8bef9SDimitry Andric 
572e8d8bef9SDimitry Andric static Allocator instance(LINKER_INITIALIZED);
573e8d8bef9SDimitry Andric 
574e8d8bef9SDimitry Andric static MemprofAllocator &get_allocator() { return instance.allocator; }
575e8d8bef9SDimitry Andric 
576e8d8bef9SDimitry Andric void InitializeAllocator() { instance.InitLinkerInitialized(); }
577e8d8bef9SDimitry Andric 
578e8d8bef9SDimitry Andric void MemprofThreadLocalMallocStorage::CommitBack() {
579e8d8bef9SDimitry Andric   GET_STACK_TRACE_MALLOC;
580e8d8bef9SDimitry Andric   instance.CommitBack(this, &stack);
581e8d8bef9SDimitry Andric }
582e8d8bef9SDimitry Andric 
583e8d8bef9SDimitry Andric void PrintInternalAllocatorStats() { instance.PrintStats(); }
584e8d8bef9SDimitry Andric 
585e8d8bef9SDimitry Andric void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
586e8d8bef9SDimitry Andric   instance.Deallocate(ptr, 0, 0, stack, alloc_type);
587e8d8bef9SDimitry Andric }
588e8d8bef9SDimitry Andric 
589e8d8bef9SDimitry Andric void memprof_delete(void *ptr, uptr size, uptr alignment,
590e8d8bef9SDimitry Andric                     BufferedStackTrace *stack, AllocType alloc_type) {
591e8d8bef9SDimitry Andric   instance.Deallocate(ptr, size, alignment, stack, alloc_type);
592e8d8bef9SDimitry Andric }
593e8d8bef9SDimitry Andric 
594e8d8bef9SDimitry Andric void *memprof_malloc(uptr size, BufferedStackTrace *stack) {
595e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
596e8d8bef9SDimitry Andric }
597e8d8bef9SDimitry Andric 
598e8d8bef9SDimitry Andric void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
599e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
600e8d8bef9SDimitry Andric }
601e8d8bef9SDimitry Andric 
602e8d8bef9SDimitry Andric void *memprof_reallocarray(void *p, uptr nmemb, uptr size,
603e8d8bef9SDimitry Andric                            BufferedStackTrace *stack) {
604e8d8bef9SDimitry Andric   if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
605e8d8bef9SDimitry Andric     errno = errno_ENOMEM;
606e8d8bef9SDimitry Andric     if (AllocatorMayReturnNull())
607e8d8bef9SDimitry Andric       return nullptr;
608e8d8bef9SDimitry Andric     ReportReallocArrayOverflow(nmemb, size, stack);
609e8d8bef9SDimitry Andric   }
610e8d8bef9SDimitry Andric   return memprof_realloc(p, nmemb * size, stack);
611e8d8bef9SDimitry Andric }
612e8d8bef9SDimitry Andric 
613e8d8bef9SDimitry Andric void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) {
614e8d8bef9SDimitry Andric   if (!p)
615e8d8bef9SDimitry Andric     return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC));
616e8d8bef9SDimitry Andric   if (size == 0) {
617e8d8bef9SDimitry Andric     if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
618e8d8bef9SDimitry Andric       instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
619e8d8bef9SDimitry Andric       return nullptr;
620e8d8bef9SDimitry Andric     }
621e8d8bef9SDimitry Andric     // Allocate a size of 1 if we shouldn't free() on Realloc to 0
622e8d8bef9SDimitry Andric     size = 1;
623e8d8bef9SDimitry Andric   }
624e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Reallocate(p, size, stack));
625e8d8bef9SDimitry Andric }
626e8d8bef9SDimitry Andric 
627e8d8bef9SDimitry Andric void *memprof_valloc(uptr size, BufferedStackTrace *stack) {
628e8d8bef9SDimitry Andric   return SetErrnoOnNull(
629e8d8bef9SDimitry Andric       instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC));
630e8d8bef9SDimitry Andric }
631e8d8bef9SDimitry Andric 
632e8d8bef9SDimitry Andric void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) {
633e8d8bef9SDimitry Andric   uptr PageSize = GetPageSizeCached();
634e8d8bef9SDimitry Andric   if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
635e8d8bef9SDimitry Andric     errno = errno_ENOMEM;
636e8d8bef9SDimitry Andric     if (AllocatorMayReturnNull())
637e8d8bef9SDimitry Andric       return nullptr;
638e8d8bef9SDimitry Andric     ReportPvallocOverflow(size, stack);
639e8d8bef9SDimitry Andric   }
640e8d8bef9SDimitry Andric   // pvalloc(0) should allocate one page.
641e8d8bef9SDimitry Andric   size = size ? RoundUpTo(size, PageSize) : PageSize;
642e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC));
643e8d8bef9SDimitry Andric }
644e8d8bef9SDimitry Andric 
645e8d8bef9SDimitry Andric void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
646e8d8bef9SDimitry Andric                        AllocType alloc_type) {
647e8d8bef9SDimitry Andric   if (UNLIKELY(!IsPowerOfTwo(alignment))) {
648e8d8bef9SDimitry Andric     errno = errno_EINVAL;
649e8d8bef9SDimitry Andric     if (AllocatorMayReturnNull())
650e8d8bef9SDimitry Andric       return nullptr;
651e8d8bef9SDimitry Andric     ReportInvalidAllocationAlignment(alignment, stack);
652e8d8bef9SDimitry Andric   }
653e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type));
654e8d8bef9SDimitry Andric }
655e8d8bef9SDimitry Andric 
656e8d8bef9SDimitry Andric void *memprof_aligned_alloc(uptr alignment, uptr size,
657e8d8bef9SDimitry Andric                             BufferedStackTrace *stack) {
658e8d8bef9SDimitry Andric   if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
659e8d8bef9SDimitry Andric     errno = errno_EINVAL;
660e8d8bef9SDimitry Andric     if (AllocatorMayReturnNull())
661e8d8bef9SDimitry Andric       return nullptr;
662e8d8bef9SDimitry Andric     ReportInvalidAlignedAllocAlignment(size, alignment, stack);
663e8d8bef9SDimitry Andric   }
664e8d8bef9SDimitry Andric   return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC));
665e8d8bef9SDimitry Andric }
666e8d8bef9SDimitry Andric 
667e8d8bef9SDimitry Andric int memprof_posix_memalign(void **memptr, uptr alignment, uptr size,
668e8d8bef9SDimitry Andric                            BufferedStackTrace *stack) {
669e8d8bef9SDimitry Andric   if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
670e8d8bef9SDimitry Andric     if (AllocatorMayReturnNull())
671e8d8bef9SDimitry Andric       return errno_EINVAL;
672e8d8bef9SDimitry Andric     ReportInvalidPosixMemalignAlignment(alignment, stack);
673e8d8bef9SDimitry Andric   }
674e8d8bef9SDimitry Andric   void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC);
675e8d8bef9SDimitry Andric   if (UNLIKELY(!ptr))
676e8d8bef9SDimitry Andric     // OOM error is already taken care of by Allocate.
677e8d8bef9SDimitry Andric     return errno_ENOMEM;
678e8d8bef9SDimitry Andric   CHECK(IsAligned((uptr)ptr, alignment));
679e8d8bef9SDimitry Andric   *memptr = ptr;
680e8d8bef9SDimitry Andric   return 0;
681e8d8bef9SDimitry Andric }
682e8d8bef9SDimitry Andric 
683e8d8bef9SDimitry Andric uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
684e8d8bef9SDimitry Andric   if (!ptr)
685e8d8bef9SDimitry Andric     return 0;
686e8d8bef9SDimitry Andric   uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
687e8d8bef9SDimitry Andric   return usable_size;
688e8d8bef9SDimitry Andric }
689e8d8bef9SDimitry Andric 
690e8d8bef9SDimitry Andric } // namespace __memprof
691e8d8bef9SDimitry Andric 
692e8d8bef9SDimitry Andric // ---------------------- Interface ---------------- {{{1
693e8d8bef9SDimitry Andric using namespace __memprof;
694e8d8bef9SDimitry Andric 
695e8d8bef9SDimitry Andric uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
696e8d8bef9SDimitry Andric 
697e8d8bef9SDimitry Andric int __sanitizer_get_ownership(const void *p) {
698e8d8bef9SDimitry Andric   return memprof_malloc_usable_size(p, 0, 0) != 0;
699e8d8bef9SDimitry Andric }
700e8d8bef9SDimitry Andric 
701e8d8bef9SDimitry Andric uptr __sanitizer_get_allocated_size(const void *p) {
702e8d8bef9SDimitry Andric   return memprof_malloc_usable_size(p, 0, 0);
703e8d8bef9SDimitry Andric }
704e8d8bef9SDimitry Andric 
705e8d8bef9SDimitry Andric int __memprof_profile_dump() {
706349cc55cSDimitry Andric   instance.FinishAndWrite();
707e8d8bef9SDimitry Andric   // In the future we may want to return non-zero if there are any errors
708e8d8bef9SDimitry Andric   // detected during the dumping process.
709e8d8bef9SDimitry Andric   return 0;
710e8d8bef9SDimitry Andric }
711