1 //===-- memprof_allocator.cpp --------------------------------------------===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is a part of MemProfiler, a memory profiler. 10 // 11 // Implementation of MemProf's memory allocator, which uses the allocator 12 // from sanitizer_common. 13 // 14 //===----------------------------------------------------------------------===// 15 16 #include "memprof_allocator.h" 17 #include "memprof_mapping.h" 18 #include "memprof_meminfoblock.h" 19 #include "memprof_mibmap.h" 20 #include "memprof_rawprofile.h" 21 #include "memprof_stack.h" 22 #include "memprof_thread.h" 23 #include "sanitizer_common/sanitizer_allocator_checks.h" 24 #include "sanitizer_common/sanitizer_allocator_interface.h" 25 #include "sanitizer_common/sanitizer_allocator_report.h" 26 #include "sanitizer_common/sanitizer_errno.h" 27 #include "sanitizer_common/sanitizer_file.h" 28 #include "sanitizer_common/sanitizer_flags.h" 29 #include "sanitizer_common/sanitizer_internal_defs.h" 30 #include "sanitizer_common/sanitizer_list.h" 31 #include "sanitizer_common/sanitizer_procmaps.h" 32 #include "sanitizer_common/sanitizer_stackdepot.h" 33 #include "sanitizer_common/sanitizer_vector.h" 34 35 #include <sched.h> 36 #include <time.h> 37 38 namespace __memprof { 39 40 static int GetCpuId(void) { 41 // _memprof_preinit is called via the preinit_array, which subsequently calls 42 // malloc. Since this is before _dl_init calls VDSO_SETUP, sched_getcpu 43 // will seg fault as the address of __vdso_getcpu will be null. 44 if (!memprof_init_done) 45 return -1; 46 return sched_getcpu(); 47 } 48 49 // Compute the timestamp in ms. 50 static int GetTimestamp(void) { 51 // timespec_get will segfault if called from dl_init 52 if (!memprof_timestamp_inited) { 53 // By returning 0, this will be effectively treated as being 54 // timestamped at memprof init time (when memprof_init_timestamp_s 55 // is initialized). 56 return 0; 57 } 58 timespec ts; 59 clock_gettime(CLOCK_REALTIME, &ts); 60 return (ts.tv_sec - memprof_init_timestamp_s) * 1000 + ts.tv_nsec / 1000000; 61 } 62 63 static MemprofAllocator &get_allocator(); 64 65 // The memory chunk allocated from the underlying allocator looks like this: 66 // H H U U U U U U 67 // H -- ChunkHeader (32 bytes) 68 // U -- user memory. 69 70 // If there is left padding before the ChunkHeader (due to use of memalign), 71 // we store a magic value in the first uptr word of the memory block and 72 // store the address of ChunkHeader in the next uptr. 73 // M B L L L L L L L L L H H U U U U U U 74 // | ^ 75 // ---------------------| 76 // M -- magic value kAllocBegMagic 77 // B -- address of ChunkHeader pointing to the first 'H' 78 79 constexpr uptr kMaxAllowedMallocBits = 40; 80 81 // Should be no more than 32-bytes 82 struct ChunkHeader { 83 // 1-st 4 bytes. 84 u32 alloc_context_id; 85 // 2-nd 4 bytes 86 u32 cpu_id; 87 // 3-rd 4 bytes 88 u32 timestamp_ms; 89 // 4-th 4 bytes 90 // Note only 1 bit is needed for this flag if we need space in the future for 91 // more fields. 92 u32 from_memalign; 93 // 5-th and 6-th 4 bytes 94 // The max size of an allocation is 2^40 (kMaxAllowedMallocSize), so this 95 // could be shrunk to kMaxAllowedMallocBits if we need space in the future for 96 // more fields. 97 atomic_uint64_t user_requested_size; 98 // 23 bits available 99 // 7-th and 8-th 4 bytes 100 u64 data_type_id; // TODO: hash of type name 101 }; 102 103 static const uptr kChunkHeaderSize = sizeof(ChunkHeader); 104 COMPILER_CHECK(kChunkHeaderSize == 32); 105 106 struct MemprofChunk : ChunkHeader { 107 uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; } 108 uptr UsedSize() { 109 return atomic_load(&user_requested_size, memory_order_relaxed); 110 } 111 void *AllocBeg() { 112 if (from_memalign) 113 return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this)); 114 return reinterpret_cast<void *>(this); 115 } 116 }; 117 118 class LargeChunkHeader { 119 static constexpr uptr kAllocBegMagic = 120 FIRST_32_SECOND_64(0xCC6E96B9, 0xCC6E96B9CC6E96B9ULL); 121 atomic_uintptr_t magic; 122 MemprofChunk *chunk_header; 123 124 public: 125 MemprofChunk *Get() const { 126 return atomic_load(&magic, memory_order_acquire) == kAllocBegMagic 127 ? chunk_header 128 : nullptr; 129 } 130 131 void Set(MemprofChunk *p) { 132 if (p) { 133 chunk_header = p; 134 atomic_store(&magic, kAllocBegMagic, memory_order_release); 135 return; 136 } 137 138 uptr old = kAllocBegMagic; 139 if (!atomic_compare_exchange_strong(&magic, &old, 0, 140 memory_order_release)) { 141 CHECK_EQ(old, kAllocBegMagic); 142 } 143 } 144 }; 145 146 void FlushUnneededMemProfShadowMemory(uptr p, uptr size) { 147 // Since memprof's mapping is compacting, the shadow chunk may be 148 // not page-aligned, so we only flush the page-aligned portion. 149 ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size)); 150 } 151 152 void MemprofMapUnmapCallback::OnMap(uptr p, uptr size) const { 153 // Statistics. 154 MemprofStats &thread_stats = GetCurrentThreadStats(); 155 thread_stats.mmaps++; 156 thread_stats.mmaped += size; 157 } 158 void MemprofMapUnmapCallback::OnUnmap(uptr p, uptr size) const { 159 // We are about to unmap a chunk of user memory. 160 // Mark the corresponding shadow memory as not needed. 161 FlushUnneededMemProfShadowMemory(p, size); 162 // Statistics. 163 MemprofStats &thread_stats = GetCurrentThreadStats(); 164 thread_stats.munmaps++; 165 thread_stats.munmaped += size; 166 } 167 168 AllocatorCache *GetAllocatorCache(MemprofThreadLocalMallocStorage *ms) { 169 CHECK(ms); 170 return &ms->allocator_cache; 171 } 172 173 // Accumulates the access count from the shadow for the given pointer and size. 174 u64 GetShadowCount(uptr p, u32 size) { 175 u64 *shadow = (u64 *)MEM_TO_SHADOW(p); 176 u64 *shadow_end = (u64 *)MEM_TO_SHADOW(p + size); 177 u64 count = 0; 178 for (; shadow <= shadow_end; shadow++) 179 count += *shadow; 180 return count; 181 } 182 183 // Clears the shadow counters (when memory is allocated). 184 void ClearShadow(uptr addr, uptr size) { 185 CHECK(AddrIsAlignedByGranularity(addr)); 186 CHECK(AddrIsInMem(addr)); 187 CHECK(AddrIsAlignedByGranularity(addr + size)); 188 CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY)); 189 CHECK(REAL(memset)); 190 uptr shadow_beg = MEM_TO_SHADOW(addr); 191 uptr shadow_end = MEM_TO_SHADOW(addr + size - SHADOW_GRANULARITY) + 1; 192 if (shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) { 193 REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg); 194 } else { 195 uptr page_size = GetPageSizeCached(); 196 uptr page_beg = RoundUpTo(shadow_beg, page_size); 197 uptr page_end = RoundDownTo(shadow_end, page_size); 198 199 if (page_beg >= page_end) { 200 REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg); 201 } else { 202 if (page_beg != shadow_beg) { 203 REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg); 204 } 205 if (page_end != shadow_end) { 206 REAL(memset)((void *)page_end, 0, shadow_end - page_end); 207 } 208 ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr); 209 } 210 } 211 } 212 213 struct Allocator { 214 static const uptr kMaxAllowedMallocSize = 1ULL << kMaxAllowedMallocBits; 215 216 MemprofAllocator allocator; 217 StaticSpinMutex fallback_mutex; 218 AllocatorCache fallback_allocator_cache; 219 220 uptr max_user_defined_malloc_size; 221 atomic_uint8_t rss_limit_exceeded; 222 223 // Holds the mapping of stack ids to MemInfoBlocks. 224 MIBMapTy MIBMap; 225 226 atomic_uint8_t destructing; 227 atomic_uint8_t constructed; 228 bool print_text; 229 230 // ------------------- Initialization ------------------------ 231 explicit Allocator(LinkerInitialized) : print_text(flags()->print_text) { 232 atomic_store_relaxed(&destructing, 0); 233 atomic_store_relaxed(&constructed, 1); 234 } 235 236 ~Allocator() { 237 atomic_store_relaxed(&destructing, 1); 238 FinishAndWrite(); 239 } 240 241 static void PrintCallback(const uptr Key, LockedMemInfoBlock *const &Value, 242 void *Arg) { 243 SpinMutexLock(&Value->mutex); 244 Value->mib.Print(Key, bool(Arg)); 245 } 246 247 void FinishAndWrite() { 248 if (print_text && common_flags()->print_module_map) 249 DumpProcessMap(); 250 251 allocator.ForceLock(); 252 253 InsertLiveBlocks(); 254 if (print_text) { 255 if (!flags()->print_terse) 256 Printf("Recorded MIBs (incl. live on exit):\n"); 257 MIBMap.ForEach(PrintCallback, 258 reinterpret_cast<void *>(flags()->print_terse)); 259 StackDepotPrintAll(); 260 } else { 261 // Serialize the contents to a raw profile. Format documented in 262 // memprof_rawprofile.h. 263 char *Buffer = nullptr; 264 265 MemoryMappingLayout Layout(/*cache_enabled=*/true); 266 u64 BytesSerialized = SerializeToRawProfile(MIBMap, Layout, Buffer); 267 CHECK(Buffer && BytesSerialized && "could not serialize to buffer"); 268 report_file.Write(Buffer, BytesSerialized); 269 } 270 271 allocator.ForceUnlock(); 272 } 273 274 // Inserts any blocks which have been allocated but not yet deallocated. 275 void InsertLiveBlocks() { 276 allocator.ForEachChunk( 277 [](uptr chunk, void *alloc) { 278 u64 user_requested_size; 279 Allocator *A = (Allocator *)alloc; 280 MemprofChunk *m = 281 A->GetMemprofChunk((void *)chunk, user_requested_size); 282 if (!m) 283 return; 284 uptr user_beg = ((uptr)m) + kChunkHeaderSize; 285 u64 c = GetShadowCount(user_beg, user_requested_size); 286 long curtime = GetTimestamp(); 287 MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime, 288 m->cpu_id, GetCpuId()); 289 InsertOrMerge(m->alloc_context_id, newMIB, A->MIBMap); 290 }, 291 this); 292 } 293 294 void InitLinkerInitialized() { 295 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 296 allocator.InitLinkerInitialized( 297 common_flags()->allocator_release_to_os_interval_ms); 298 max_user_defined_malloc_size = common_flags()->max_allocation_size_mb 299 ? common_flags()->max_allocation_size_mb 300 << 20 301 : kMaxAllowedMallocSize; 302 } 303 304 bool RssLimitExceeded() { 305 return atomic_load(&rss_limit_exceeded, memory_order_relaxed); 306 } 307 308 void SetRssLimitExceeded(bool limit_exceeded) { 309 atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed); 310 } 311 312 // -------------------- Allocation/Deallocation routines --------------- 313 void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack, 314 AllocType alloc_type) { 315 if (UNLIKELY(!memprof_inited)) 316 MemprofInitFromRtl(); 317 if (RssLimitExceeded()) { 318 if (AllocatorMayReturnNull()) 319 return nullptr; 320 ReportRssLimitExceeded(stack); 321 } 322 CHECK(stack); 323 const uptr min_alignment = MEMPROF_ALIGNMENT; 324 if (alignment < min_alignment) 325 alignment = min_alignment; 326 if (size == 0) { 327 // We'd be happy to avoid allocating memory for zero-size requests, but 328 // some programs/tests depend on this behavior and assume that malloc 329 // would not return NULL even for zero-size allocations. Moreover, it 330 // looks like operator new should never return NULL, and results of 331 // consecutive "new" calls must be different even if the allocated size 332 // is zero. 333 size = 1; 334 } 335 CHECK(IsPowerOfTwo(alignment)); 336 uptr rounded_size = RoundUpTo(size, alignment); 337 uptr needed_size = rounded_size + kChunkHeaderSize; 338 if (alignment > min_alignment) 339 needed_size += alignment; 340 CHECK(IsAligned(needed_size, min_alignment)); 341 if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize || 342 size > max_user_defined_malloc_size) { 343 if (AllocatorMayReturnNull()) { 344 Report("WARNING: MemProfiler failed to allocate 0x%zx bytes\n", size); 345 return nullptr; 346 } 347 uptr malloc_limit = 348 Min(kMaxAllowedMallocSize, max_user_defined_malloc_size); 349 ReportAllocationSizeTooBig(size, malloc_limit, stack); 350 } 351 352 MemprofThread *t = GetCurrentThread(); 353 void *allocated; 354 if (t) { 355 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 356 allocated = allocator.Allocate(cache, needed_size, 8); 357 } else { 358 SpinMutexLock l(&fallback_mutex); 359 AllocatorCache *cache = &fallback_allocator_cache; 360 allocated = allocator.Allocate(cache, needed_size, 8); 361 } 362 if (UNLIKELY(!allocated)) { 363 SetAllocatorOutOfMemory(); 364 if (AllocatorMayReturnNull()) 365 return nullptr; 366 ReportOutOfMemory(size, stack); 367 } 368 369 uptr alloc_beg = reinterpret_cast<uptr>(allocated); 370 uptr alloc_end = alloc_beg + needed_size; 371 uptr beg_plus_header = alloc_beg + kChunkHeaderSize; 372 uptr user_beg = beg_plus_header; 373 if (!IsAligned(user_beg, alignment)) 374 user_beg = RoundUpTo(user_beg, alignment); 375 uptr user_end = user_beg + size; 376 CHECK_LE(user_end, alloc_end); 377 uptr chunk_beg = user_beg - kChunkHeaderSize; 378 MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg); 379 m->from_memalign = alloc_beg != chunk_beg; 380 CHECK(size); 381 382 m->cpu_id = GetCpuId(); 383 m->timestamp_ms = GetTimestamp(); 384 m->alloc_context_id = StackDepotPut(*stack); 385 386 uptr size_rounded_down_to_granularity = 387 RoundDownTo(size, SHADOW_GRANULARITY); 388 if (size_rounded_down_to_granularity) 389 ClearShadow(user_beg, size_rounded_down_to_granularity); 390 391 MemprofStats &thread_stats = GetCurrentThreadStats(); 392 thread_stats.mallocs++; 393 thread_stats.malloced += size; 394 thread_stats.malloced_overhead += needed_size - size; 395 if (needed_size > SizeClassMap::kMaxSize) 396 thread_stats.malloc_large++; 397 else 398 thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++; 399 400 void *res = reinterpret_cast<void *>(user_beg); 401 atomic_store(&m->user_requested_size, size, memory_order_release); 402 if (alloc_beg != chunk_beg) { 403 CHECK_LE(alloc_beg + sizeof(LargeChunkHeader), chunk_beg); 404 reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(m); 405 } 406 MEMPROF_MALLOC_HOOK(res, size); 407 return res; 408 } 409 410 void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment, 411 BufferedStackTrace *stack, AllocType alloc_type) { 412 uptr p = reinterpret_cast<uptr>(ptr); 413 if (p == 0) 414 return; 415 416 MEMPROF_FREE_HOOK(ptr); 417 418 uptr chunk_beg = p - kChunkHeaderSize; 419 MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg); 420 421 u64 user_requested_size = 422 atomic_exchange(&m->user_requested_size, 0, memory_order_acquire); 423 if (memprof_inited && memprof_init_done && 424 atomic_load_relaxed(&constructed) && 425 !atomic_load_relaxed(&destructing)) { 426 u64 c = GetShadowCount(p, user_requested_size); 427 long curtime = GetTimestamp(); 428 429 MemInfoBlock newMIB(user_requested_size, c, m->timestamp_ms, curtime, 430 m->cpu_id, GetCpuId()); 431 InsertOrMerge(m->alloc_context_id, newMIB, MIBMap); 432 } 433 434 MemprofStats &thread_stats = GetCurrentThreadStats(); 435 thread_stats.frees++; 436 thread_stats.freed += user_requested_size; 437 438 void *alloc_beg = m->AllocBeg(); 439 if (alloc_beg != m) { 440 // Clear the magic value, as allocator internals may overwrite the 441 // contents of deallocated chunk, confusing GetMemprofChunk lookup. 442 reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Set(nullptr); 443 } 444 445 MemprofThread *t = GetCurrentThread(); 446 if (t) { 447 AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); 448 allocator.Deallocate(cache, alloc_beg); 449 } else { 450 SpinMutexLock l(&fallback_mutex); 451 AllocatorCache *cache = &fallback_allocator_cache; 452 allocator.Deallocate(cache, alloc_beg); 453 } 454 } 455 456 void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) { 457 CHECK(old_ptr && new_size); 458 uptr p = reinterpret_cast<uptr>(old_ptr); 459 uptr chunk_beg = p - kChunkHeaderSize; 460 MemprofChunk *m = reinterpret_cast<MemprofChunk *>(chunk_beg); 461 462 MemprofStats &thread_stats = GetCurrentThreadStats(); 463 thread_stats.reallocs++; 464 thread_stats.realloced += new_size; 465 466 void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC); 467 if (new_ptr) { 468 CHECK_NE(REAL(memcpy), nullptr); 469 uptr memcpy_size = Min(new_size, m->UsedSize()); 470 REAL(memcpy)(new_ptr, old_ptr, memcpy_size); 471 Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC); 472 } 473 return new_ptr; 474 } 475 476 void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 477 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 478 if (AllocatorMayReturnNull()) 479 return nullptr; 480 ReportCallocOverflow(nmemb, size, stack); 481 } 482 void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC); 483 // If the memory comes from the secondary allocator no need to clear it 484 // as it comes directly from mmap. 485 if (ptr && allocator.FromPrimary(ptr)) 486 REAL(memset)(ptr, 0, nmemb * size); 487 return ptr; 488 } 489 490 void CommitBack(MemprofThreadLocalMallocStorage *ms, 491 BufferedStackTrace *stack) { 492 AllocatorCache *ac = GetAllocatorCache(ms); 493 allocator.SwallowCache(ac); 494 } 495 496 // -------------------------- Chunk lookup ---------------------- 497 498 // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg). 499 MemprofChunk *GetMemprofChunk(void *alloc_beg, u64 &user_requested_size) { 500 if (!alloc_beg) 501 return nullptr; 502 MemprofChunk *p = reinterpret_cast<LargeChunkHeader *>(alloc_beg)->Get(); 503 if (!p) { 504 if (!allocator.FromPrimary(alloc_beg)) 505 return nullptr; 506 p = reinterpret_cast<MemprofChunk *>(alloc_beg); 507 } 508 // The size is reset to 0 on deallocation (and a min of 1 on 509 // allocation). 510 user_requested_size = 511 atomic_load(&p->user_requested_size, memory_order_acquire); 512 if (user_requested_size) 513 return p; 514 return nullptr; 515 } 516 517 MemprofChunk *GetMemprofChunkByAddr(uptr p, u64 &user_requested_size) { 518 void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p)); 519 return GetMemprofChunk(alloc_beg, user_requested_size); 520 } 521 522 uptr AllocationSize(uptr p) { 523 u64 user_requested_size; 524 MemprofChunk *m = GetMemprofChunkByAddr(p, user_requested_size); 525 if (!m) 526 return 0; 527 if (m->Beg() != p) 528 return 0; 529 return user_requested_size; 530 } 531 532 void Purge(BufferedStackTrace *stack) { allocator.ForceReleaseToOS(); } 533 534 void PrintStats() { allocator.PrintStats(); } 535 536 void ForceLock() NO_THREAD_SAFETY_ANALYSIS { 537 allocator.ForceLock(); 538 fallback_mutex.Lock(); 539 } 540 541 void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS { 542 fallback_mutex.Unlock(); 543 allocator.ForceUnlock(); 544 } 545 }; 546 547 static Allocator instance(LINKER_INITIALIZED); 548 549 static MemprofAllocator &get_allocator() { return instance.allocator; } 550 551 void InitializeAllocator() { instance.InitLinkerInitialized(); } 552 553 void MemprofThreadLocalMallocStorage::CommitBack() { 554 GET_STACK_TRACE_MALLOC; 555 instance.CommitBack(this, &stack); 556 } 557 558 void PrintInternalAllocatorStats() { instance.PrintStats(); } 559 560 void memprof_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) { 561 instance.Deallocate(ptr, 0, 0, stack, alloc_type); 562 } 563 564 void memprof_delete(void *ptr, uptr size, uptr alignment, 565 BufferedStackTrace *stack, AllocType alloc_type) { 566 instance.Deallocate(ptr, size, alignment, stack, alloc_type); 567 } 568 569 void *memprof_malloc(uptr size, BufferedStackTrace *stack) { 570 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC)); 571 } 572 573 void *memprof_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) { 574 return SetErrnoOnNull(instance.Calloc(nmemb, size, stack)); 575 } 576 577 void *memprof_reallocarray(void *p, uptr nmemb, uptr size, 578 BufferedStackTrace *stack) { 579 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 580 errno = errno_ENOMEM; 581 if (AllocatorMayReturnNull()) 582 return nullptr; 583 ReportReallocArrayOverflow(nmemb, size, stack); 584 } 585 return memprof_realloc(p, nmemb * size, stack); 586 } 587 588 void *memprof_realloc(void *p, uptr size, BufferedStackTrace *stack) { 589 if (!p) 590 return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC)); 591 if (size == 0) { 592 if (flags()->allocator_frees_and_returns_null_on_realloc_zero) { 593 instance.Deallocate(p, 0, 0, stack, FROM_MALLOC); 594 return nullptr; 595 } 596 // Allocate a size of 1 if we shouldn't free() on Realloc to 0 597 size = 1; 598 } 599 return SetErrnoOnNull(instance.Reallocate(p, size, stack)); 600 } 601 602 void *memprof_valloc(uptr size, BufferedStackTrace *stack) { 603 return SetErrnoOnNull( 604 instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC)); 605 } 606 607 void *memprof_pvalloc(uptr size, BufferedStackTrace *stack) { 608 uptr PageSize = GetPageSizeCached(); 609 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 610 errno = errno_ENOMEM; 611 if (AllocatorMayReturnNull()) 612 return nullptr; 613 ReportPvallocOverflow(size, stack); 614 } 615 // pvalloc(0) should allocate one page. 616 size = size ? RoundUpTo(size, PageSize) : PageSize; 617 return SetErrnoOnNull(instance.Allocate(size, PageSize, stack, FROM_MALLOC)); 618 } 619 620 void *memprof_memalign(uptr alignment, uptr size, BufferedStackTrace *stack, 621 AllocType alloc_type) { 622 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 623 errno = errno_EINVAL; 624 if (AllocatorMayReturnNull()) 625 return nullptr; 626 ReportInvalidAllocationAlignment(alignment, stack); 627 } 628 return SetErrnoOnNull(instance.Allocate(size, alignment, stack, alloc_type)); 629 } 630 631 void *memprof_aligned_alloc(uptr alignment, uptr size, 632 BufferedStackTrace *stack) { 633 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 634 errno = errno_EINVAL; 635 if (AllocatorMayReturnNull()) 636 return nullptr; 637 ReportInvalidAlignedAllocAlignment(size, alignment, stack); 638 } 639 return SetErrnoOnNull(instance.Allocate(size, alignment, stack, FROM_MALLOC)); 640 } 641 642 int memprof_posix_memalign(void **memptr, uptr alignment, uptr size, 643 BufferedStackTrace *stack) { 644 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 645 if (AllocatorMayReturnNull()) 646 return errno_EINVAL; 647 ReportInvalidPosixMemalignAlignment(alignment, stack); 648 } 649 void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC); 650 if (UNLIKELY(!ptr)) 651 // OOM error is already taken care of by Allocate. 652 return errno_ENOMEM; 653 CHECK(IsAligned((uptr)ptr, alignment)); 654 *memptr = ptr; 655 return 0; 656 } 657 658 uptr memprof_malloc_usable_size(const void *ptr, uptr pc, uptr bp) { 659 if (!ptr) 660 return 0; 661 uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr)); 662 return usable_size; 663 } 664 665 void MemprofSoftRssLimitExceededCallback(bool limit_exceeded) { 666 instance.SetRssLimitExceeded(limit_exceeded); 667 } 668 669 } // namespace __memprof 670 671 // ---------------------- Interface ---------------- {{{1 672 using namespace __memprof; 673 674 #if !SANITIZER_SUPPORTS_WEAK_HOOKS 675 // Provide default (no-op) implementation of malloc hooks. 676 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook, void *ptr, 677 uptr size) { 678 (void)ptr; 679 (void)size; 680 } 681 682 SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) { 683 (void)ptr; 684 } 685 #endif 686 687 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 688 689 int __sanitizer_get_ownership(const void *p) { 690 return memprof_malloc_usable_size(p, 0, 0) != 0; 691 } 692 693 uptr __sanitizer_get_allocated_size(const void *p) { 694 return memprof_malloc_usable_size(p, 0, 0); 695 } 696 697 int __memprof_profile_dump() { 698 instance.FinishAndWrite(); 699 // In the future we may want to return non-zero if there are any errors 700 // detected during the dumping process. 701 return 0; 702 } 703