1 //===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 9 #include "gwp_asan/guarded_pool_allocator.h" 10 11 #include "gwp_asan/options.h" 12 13 // RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this 14 // macro is defined before including <inttypes.h>. 15 #ifndef __STDC_FORMAT_MACROS 16 #define __STDC_FORMAT_MACROS 1 17 #endif 18 19 #include <assert.h> 20 #include <inttypes.h> 21 #include <stdio.h> 22 #include <stdlib.h> 23 #include <string.h> 24 #include <time.h> 25 26 using AllocationMetadata = gwp_asan::GuardedPoolAllocator::AllocationMetadata; 27 using Error = gwp_asan::GuardedPoolAllocator::Error; 28 29 namespace gwp_asan { 30 namespace { 31 // Forward declare the pointer to the singleton version of this class. 32 // Instantiated during initialisation, this allows the signal handler 33 // to find this class in order to deduce the root cause of failures. Must not be 34 // referenced by users outside this translation unit, in order to avoid 35 // init-order-fiasco. 36 GuardedPoolAllocator *SingletonPtr = nullptr; 37 38 class ScopedBoolean { 39 public: 40 ScopedBoolean(bool &B) : Bool(B) { Bool = true; } 41 ~ScopedBoolean() { Bool = false; } 42 43 private: 44 bool &Bool; 45 }; 46 47 void defaultPrintStackTrace(uintptr_t *Trace, size_t TraceLength, 48 options::Printf_t Printf) { 49 if (TraceLength == 0) 50 Printf(" <unknown (does your allocator support backtracing?)>\n"); 51 52 for (size_t i = 0; i < TraceLength; ++i) { 53 Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]); 54 } 55 Printf("\n"); 56 } 57 } // anonymous namespace 58 59 // Gets the singleton implementation of this class. Thread-compatible until 60 // init() is called, thread-safe afterwards. 61 GuardedPoolAllocator *GuardedPoolAllocator::getSingleton() { 62 return SingletonPtr; 63 } 64 65 void GuardedPoolAllocator::AllocationMetadata::RecordAllocation( 66 uintptr_t AllocAddr, size_t AllocSize, options::Backtrace_t Backtrace) { 67 Addr = AllocAddr; 68 Size = AllocSize; 69 IsDeallocated = false; 70 71 // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste 72 // other thread's time getting the thread ID under lock. 73 AllocationTrace.ThreadID = getThreadID(); 74 AllocationTrace.TraceSize = 0; 75 DeallocationTrace.TraceSize = 0; 76 DeallocationTrace.ThreadID = kInvalidThreadID; 77 78 if (Backtrace) { 79 uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; 80 size_t BacktraceLength = 81 Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); 82 AllocationTrace.TraceSize = compression::pack( 83 UncompressedBuffer, BacktraceLength, AllocationTrace.CompressedTrace, 84 kStackFrameStorageBytes); 85 } 86 } 87 88 void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation( 89 options::Backtrace_t Backtrace) { 90 IsDeallocated = true; 91 // Ensure that the unwinder is not called if the recursive flag is set, 92 // otherwise non-reentrant unwinders may deadlock. 93 DeallocationTrace.TraceSize = 0; 94 if (Backtrace && !ThreadLocals.RecursiveGuard) { 95 ScopedBoolean B(ThreadLocals.RecursiveGuard); 96 97 uintptr_t UncompressedBuffer[kMaxTraceLengthToCollect]; 98 size_t BacktraceLength = 99 Backtrace(UncompressedBuffer, kMaxTraceLengthToCollect); 100 DeallocationTrace.TraceSize = compression::pack( 101 UncompressedBuffer, BacktraceLength, DeallocationTrace.CompressedTrace, 102 kStackFrameStorageBytes); 103 } 104 DeallocationTrace.ThreadID = getThreadID(); 105 } 106 107 void GuardedPoolAllocator::init(const options::Options &Opts) { 108 // Note: We return from the constructor here if GWP-ASan is not available. 109 // This will stop heap-allocation of class members, as well as mmap() of the 110 // guarded slots. 111 if (!Opts.Enabled || Opts.SampleRate == 0 || 112 Opts.MaxSimultaneousAllocations == 0) 113 return; 114 115 if (Opts.SampleRate < 0) { 116 Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n"); 117 exit(EXIT_FAILURE); 118 } 119 120 if (Opts.SampleRate > INT32_MAX) { 121 Opts.Printf("GWP-ASan Error: SampleRate is > 2^31.\n"); 122 exit(EXIT_FAILURE); 123 } 124 125 if (Opts.MaxSimultaneousAllocations < 0) { 126 Opts.Printf("GWP-ASan Error: MaxSimultaneousAllocations is < 0.\n"); 127 exit(EXIT_FAILURE); 128 } 129 130 SingletonPtr = this; 131 132 MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; 133 134 PageSize = getPlatformPageSize(); 135 136 PerfectlyRightAlign = Opts.PerfectlyRightAlign; 137 Printf = Opts.Printf; 138 Backtrace = Opts.Backtrace; 139 if (Opts.PrintBacktrace) 140 PrintBacktrace = Opts.PrintBacktrace; 141 else 142 PrintBacktrace = defaultPrintStackTrace; 143 144 size_t PoolBytesRequired = 145 PageSize * (1 + MaxSimultaneousAllocations) + 146 MaxSimultaneousAllocations * maximumAllocationSize(); 147 void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); 148 149 size_t BytesRequired = MaxSimultaneousAllocations * sizeof(*Metadata); 150 Metadata = reinterpret_cast<AllocationMetadata *>( 151 mapMemory(BytesRequired, kGwpAsanMetadataName)); 152 markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); 153 154 // Allocate memory and set up the free pages queue. 155 BytesRequired = MaxSimultaneousAllocations * sizeof(*FreeSlots); 156 FreeSlots = reinterpret_cast<size_t *>( 157 mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); 158 markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); 159 160 // Multiply the sample rate by 2 to give a good, fast approximation for (1 / 161 // SampleRate) chance of sampling. 162 if (Opts.SampleRate != 1) 163 AdjustedSampleRatePlusOne = static_cast<uint32_t>(Opts.SampleRate) * 2 + 1; 164 else 165 AdjustedSampleRatePlusOne = 2; 166 167 ThreadLocals.NextSampleCounter = 168 (getRandomUnsigned32() % (AdjustedSampleRatePlusOne - 1)) + 1; 169 170 GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory); 171 GuardedPagePoolEnd = 172 reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired; 173 174 // Ensure that signal handlers are installed as late as possible, as the class 175 // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a 176 // race to members if received during init(). 177 if (Opts.InstallSignalHandlers) 178 installSignalHandlers(); 179 180 if (Opts.InstallForkHandlers) 181 installAtFork(); 182 } 183 184 void GuardedPoolAllocator::disable() { PoolMutex.lock(); } 185 186 void GuardedPoolAllocator::enable() { PoolMutex.unlock(); } 187 188 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb, 189 void *Arg) { 190 uintptr_t Start = reinterpret_cast<uintptr_t>(Base); 191 for (size_t i = 0; i < MaxSimultaneousAllocations; ++i) { 192 const AllocationMetadata &Meta = Metadata[i]; 193 if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && 194 Meta.Addr < Start + Size) 195 Cb(Meta.Addr, Meta.Size, Arg); 196 } 197 } 198 199 void GuardedPoolAllocator::uninitTestOnly() { 200 if (GuardedPagePool) { 201 unmapMemory(reinterpret_cast<void *>(GuardedPagePool), 202 GuardedPagePoolEnd - GuardedPagePool, kGwpAsanGuardPageName); 203 GuardedPagePool = 0; 204 GuardedPagePoolEnd = 0; 205 } 206 if (Metadata) { 207 unmapMemory(Metadata, MaxSimultaneousAllocations * sizeof(*Metadata), 208 kGwpAsanMetadataName); 209 Metadata = nullptr; 210 } 211 if (FreeSlots) { 212 unmapMemory(FreeSlots, MaxSimultaneousAllocations * sizeof(*FreeSlots), 213 kGwpAsanFreeSlotsName); 214 FreeSlots = nullptr; 215 } 216 uninstallSignalHandlers(); 217 } 218 219 void *GuardedPoolAllocator::allocate(size_t Size) { 220 // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall 221 // back to the supporting allocator. 222 if (GuardedPagePoolEnd == 0) 223 return nullptr; 224 225 // Protect against recursivity. 226 if (ThreadLocals.RecursiveGuard) 227 return nullptr; 228 ScopedBoolean SB(ThreadLocals.RecursiveGuard); 229 230 if (Size == 0 || Size > maximumAllocationSize()) 231 return nullptr; 232 233 size_t Index; 234 { 235 ScopedLock L(PoolMutex); 236 Index = reserveSlot(); 237 } 238 239 if (Index == kInvalidSlotID) 240 return nullptr; 241 242 uintptr_t Ptr = slotToAddr(Index); 243 Ptr += allocationSlotOffset(Size); 244 AllocationMetadata *Meta = addrToMetadata(Ptr); 245 246 // If a slot is multiple pages in size, and the allocation takes up a single 247 // page, we can improve overflow detection by leaving the unused pages as 248 // unmapped. 249 markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr)), Size, 250 kGwpAsanAliveSlotName); 251 252 Meta->RecordAllocation(Ptr, Size, Backtrace); 253 254 return reinterpret_cast<void *>(Ptr); 255 } 256 257 void GuardedPoolAllocator::deallocate(void *Ptr) { 258 assert(pointerIsMine(Ptr) && "Pointer is not mine!"); 259 uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr); 260 uintptr_t SlotStart = slotToAddr(addrToSlot(UPtr)); 261 AllocationMetadata *Meta = addrToMetadata(UPtr); 262 if (Meta->Addr != UPtr) { 263 reportError(UPtr, Error::INVALID_FREE); 264 exit(EXIT_FAILURE); 265 } 266 267 // Intentionally scope the mutex here, so that other threads can access the 268 // pool during the expensive markInaccessible() call. 269 { 270 ScopedLock L(PoolMutex); 271 if (Meta->IsDeallocated) { 272 reportError(UPtr, Error::DOUBLE_FREE); 273 exit(EXIT_FAILURE); 274 } 275 276 // Ensure that the deallocation is recorded before marking the page as 277 // inaccessible. Otherwise, a racy use-after-free will have inconsistent 278 // metadata. 279 Meta->RecordDeallocation(Backtrace); 280 } 281 282 markInaccessible(reinterpret_cast<void *>(SlotStart), maximumAllocationSize(), 283 kGwpAsanGuardPageName); 284 285 // And finally, lock again to release the slot back into the pool. 286 ScopedLock L(PoolMutex); 287 freeSlot(addrToSlot(UPtr)); 288 } 289 290 size_t GuardedPoolAllocator::getSize(const void *Ptr) { 291 assert(pointerIsMine(Ptr)); 292 ScopedLock L(PoolMutex); 293 AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr)); 294 assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr)); 295 return Meta->Size; 296 } 297 298 size_t GuardedPoolAllocator::maximumAllocationSize() const { return PageSize; } 299 300 AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { 301 return &Metadata[addrToSlot(Ptr)]; 302 } 303 304 size_t GuardedPoolAllocator::addrToSlot(uintptr_t Ptr) const { 305 assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); 306 size_t ByteOffsetFromPoolStart = Ptr - GuardedPagePool; 307 return ByteOffsetFromPoolStart / (maximumAllocationSize() + PageSize); 308 } 309 310 uintptr_t GuardedPoolAllocator::slotToAddr(size_t N) const { 311 return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N); 312 } 313 314 uintptr_t GuardedPoolAllocator::getPageAddr(uintptr_t Ptr) const { 315 assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); 316 return Ptr & ~(static_cast<uintptr_t>(PageSize) - 1); 317 } 318 319 bool GuardedPoolAllocator::isGuardPage(uintptr_t Ptr) const { 320 assert(pointerIsMine(reinterpret_cast<void *>(Ptr))); 321 size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize; 322 size_t PagesPerSlot = maximumAllocationSize() / PageSize; 323 return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0; 324 } 325 326 size_t GuardedPoolAllocator::reserveSlot() { 327 // Avoid potential reuse of a slot before we have made at least a single 328 // allocation in each slot. Helps with our use-after-free detection. 329 if (NumSampledAllocations < MaxSimultaneousAllocations) 330 return NumSampledAllocations++; 331 332 if (FreeSlotsLength == 0) 333 return kInvalidSlotID; 334 335 size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; 336 size_t SlotIndex = FreeSlots[ReservedIndex]; 337 FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; 338 return SlotIndex; 339 } 340 341 void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { 342 assert(FreeSlotsLength < MaxSimultaneousAllocations); 343 FreeSlots[FreeSlotsLength++] = SlotIndex; 344 } 345 346 uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const { 347 assert(Size > 0); 348 349 bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0; 350 if (!ShouldRightAlign) 351 return 0; 352 353 uintptr_t Offset = maximumAllocationSize(); 354 if (!PerfectlyRightAlign) { 355 if (Size == 3) 356 Size = 4; 357 else if (Size > 4 && Size <= 8) 358 Size = 8; 359 else if (Size > 8 && (Size % 16) != 0) 360 Size += 16 - (Size % 16); 361 } 362 Offset -= Size; 363 return Offset; 364 } 365 366 void GuardedPoolAllocator::reportError(uintptr_t AccessPtr, Error E) { 367 if (SingletonPtr) 368 SingletonPtr->reportErrorInternal(AccessPtr, E); 369 } 370 371 size_t GuardedPoolAllocator::getNearestSlot(uintptr_t Ptr) const { 372 if (Ptr <= GuardedPagePool + PageSize) 373 return 0; 374 if (Ptr > GuardedPagePoolEnd - PageSize) 375 return MaxSimultaneousAllocations - 1; 376 377 if (!isGuardPage(Ptr)) 378 return addrToSlot(Ptr); 379 380 if (Ptr % PageSize <= PageSize / 2) 381 return addrToSlot(Ptr - PageSize); // Round down. 382 return addrToSlot(Ptr + PageSize); // Round up. 383 } 384 385 Error GuardedPoolAllocator::diagnoseUnknownError(uintptr_t AccessPtr, 386 AllocationMetadata **Meta) { 387 // Let's try and figure out what the source of this error is. 388 if (isGuardPage(AccessPtr)) { 389 size_t Slot = getNearestSlot(AccessPtr); 390 AllocationMetadata *SlotMeta = addrToMetadata(slotToAddr(Slot)); 391 392 // Ensure that this slot was allocated once upon a time. 393 if (!SlotMeta->Addr) 394 return Error::UNKNOWN; 395 *Meta = SlotMeta; 396 397 if (SlotMeta->Addr < AccessPtr) 398 return Error::BUFFER_OVERFLOW; 399 return Error::BUFFER_UNDERFLOW; 400 } 401 402 // Access wasn't a guard page, check for use-after-free. 403 AllocationMetadata *SlotMeta = addrToMetadata(AccessPtr); 404 if (SlotMeta->IsDeallocated) { 405 *Meta = SlotMeta; 406 return Error::USE_AFTER_FREE; 407 } 408 409 // If we have reached here, the error is still unknown. There is no metadata 410 // available. 411 *Meta = nullptr; 412 return Error::UNKNOWN; 413 } 414 415 namespace { 416 // Prints the provided error and metadata information. 417 void printErrorType(Error E, uintptr_t AccessPtr, AllocationMetadata *Meta, 418 options::Printf_t Printf, uint64_t ThreadID) { 419 // Print using intermediate strings. Platforms like Android don't like when 420 // you print multiple times to the same line, as there may be a newline 421 // appended to a log file automatically per Printf() call. 422 const char *ErrorString; 423 switch (E) { 424 case Error::UNKNOWN: 425 ErrorString = "GWP-ASan couldn't automatically determine the source of " 426 "the memory error. It was likely caused by a wild memory " 427 "access into the GWP-ASan pool. The error occurred"; 428 break; 429 case Error::USE_AFTER_FREE: 430 ErrorString = "Use after free"; 431 break; 432 case Error::DOUBLE_FREE: 433 ErrorString = "Double free"; 434 break; 435 case Error::INVALID_FREE: 436 ErrorString = "Invalid (wild) free"; 437 break; 438 case Error::BUFFER_OVERFLOW: 439 ErrorString = "Buffer overflow"; 440 break; 441 case Error::BUFFER_UNDERFLOW: 442 ErrorString = "Buffer underflow"; 443 break; 444 } 445 446 constexpr size_t kDescriptionBufferLen = 128; 447 char DescriptionBuffer[kDescriptionBufferLen]; 448 if (Meta) { 449 if (E == Error::USE_AFTER_FREE) { 450 snprintf(DescriptionBuffer, kDescriptionBufferLen, 451 "(%zu byte%s into a %zu-byte allocation at 0x%zx)", 452 AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s", 453 Meta->Size, Meta->Addr); 454 } else if (AccessPtr < Meta->Addr) { 455 snprintf(DescriptionBuffer, kDescriptionBufferLen, 456 "(%zu byte%s to the left of a %zu-byte allocation at 0x%zx)", 457 Meta->Addr - AccessPtr, (Meta->Addr - AccessPtr == 1) ? "" : "s", 458 Meta->Size, Meta->Addr); 459 } else if (AccessPtr > Meta->Addr) { 460 snprintf(DescriptionBuffer, kDescriptionBufferLen, 461 "(%zu byte%s to the right of a %zu-byte allocation at 0x%zx)", 462 AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s", 463 Meta->Size, Meta->Addr); 464 } else { 465 snprintf(DescriptionBuffer, kDescriptionBufferLen, 466 "(a %zu-byte allocation)", Meta->Size); 467 } 468 } 469 470 // Possible number of digits of a 64-bit number: ceil(log10(2^64)) == 20. Add 471 // a null terminator, and round to the nearest 8-byte boundary. 472 constexpr size_t kThreadBufferLen = 24; 473 char ThreadBuffer[kThreadBufferLen]; 474 if (ThreadID == GuardedPoolAllocator::kInvalidThreadID) 475 snprintf(ThreadBuffer, kThreadBufferLen, "<unknown>"); 476 else 477 snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID); 478 479 Printf("%s at 0x%zx %s by thread %s here:\n", ErrorString, AccessPtr, 480 DescriptionBuffer, ThreadBuffer); 481 } 482 483 void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta, 484 options::Printf_t Printf, 485 options::PrintBacktrace_t PrintBacktrace) { 486 assert(Meta != nullptr && "Metadata is non-null for printAllocDeallocTraces"); 487 488 if (Meta->IsDeallocated) { 489 if (Meta->DeallocationTrace.ThreadID == 490 GuardedPoolAllocator::kInvalidThreadID) 491 Printf("0x%zx was deallocated by thread <unknown> here:\n", AccessPtr); 492 else 493 Printf("0x%zx was deallocated by thread %zu here:\n", AccessPtr, 494 Meta->DeallocationTrace.ThreadID); 495 496 uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; 497 size_t UncompressedLength = compression::unpack( 498 Meta->DeallocationTrace.CompressedTrace, 499 Meta->DeallocationTrace.TraceSize, UncompressedTrace, 500 AllocationMetadata::kMaxTraceLengthToCollect); 501 502 PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); 503 } 504 505 if (Meta->AllocationTrace.ThreadID == GuardedPoolAllocator::kInvalidThreadID) 506 Printf("0x%zx was allocated by thread <unknown> here:\n", Meta->Addr); 507 else 508 Printf("0x%zx was allocated by thread %zu here:\n", Meta->Addr, 509 Meta->AllocationTrace.ThreadID); 510 511 uintptr_t UncompressedTrace[AllocationMetadata::kMaxTraceLengthToCollect]; 512 size_t UncompressedLength = compression::unpack( 513 Meta->AllocationTrace.CompressedTrace, Meta->AllocationTrace.TraceSize, 514 UncompressedTrace, AllocationMetadata::kMaxTraceLengthToCollect); 515 516 PrintBacktrace(UncompressedTrace, UncompressedLength, Printf); 517 } 518 519 struct ScopedEndOfReportDecorator { 520 ScopedEndOfReportDecorator(options::Printf_t Printf) : Printf(Printf) {} 521 ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); } 522 options::Printf_t Printf; 523 }; 524 } // anonymous namespace 525 526 void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) { 527 if (!pointerIsMine(reinterpret_cast<void *>(AccessPtr))) { 528 return; 529 } 530 531 // Attempt to prevent races to re-use the same slot that triggered this error. 532 // This does not guarantee that there are no races, because another thread can 533 // take the locks during the time that the signal handler is being called. 534 PoolMutex.tryLock(); 535 ThreadLocals.RecursiveGuard = true; 536 537 Printf("*** GWP-ASan detected a memory error ***\n"); 538 ScopedEndOfReportDecorator Decorator(Printf); 539 540 AllocationMetadata *Meta = nullptr; 541 542 if (E == Error::UNKNOWN) { 543 E = diagnoseUnknownError(AccessPtr, &Meta); 544 } else { 545 size_t Slot = getNearestSlot(AccessPtr); 546 Meta = addrToMetadata(slotToAddr(Slot)); 547 // Ensure that this slot has been previously allocated. 548 if (!Meta->Addr) 549 Meta = nullptr; 550 } 551 552 // Print the error information. 553 uint64_t ThreadID = getThreadID(); 554 printErrorType(E, AccessPtr, Meta, Printf, ThreadID); 555 if (Backtrace) { 556 static constexpr unsigned kMaximumStackFramesForCrashTrace = 512; 557 uintptr_t Trace[kMaximumStackFramesForCrashTrace]; 558 size_t TraceLength = Backtrace(Trace, kMaximumStackFramesForCrashTrace); 559 560 PrintBacktrace(Trace, TraceLength, Printf); 561 } else { 562 Printf(" <unknown (does your allocator support backtracing?)>\n\n"); 563 } 564 565 if (Meta) 566 printAllocDeallocTraces(AccessPtr, Meta, Printf, PrintBacktrace); 567 } 568 569 GWP_ASAN_TLS_INITIAL_EXEC 570 GuardedPoolAllocator::ThreadLocalPackedVariables 571 GuardedPoolAllocator::ThreadLocals; 572 } // namespace gwp_asan 573