1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // This file is shared between run-time libraries of sanitizers. 10 // 11 // It declares common functions and classes that are used in both runtimes. 12 // Implementation of some functions are provided in sanitizer_common, while 13 // others must be defined by run-time library itself. 14 //===----------------------------------------------------------------------===// 15 #ifndef SANITIZER_COMMON_H 16 #define SANITIZER_COMMON_H 17 18 #include "sanitizer_flags.h" 19 #include "sanitizer_internal_defs.h" 20 #include "sanitizer_libc.h" 21 #include "sanitizer_list.h" 22 #include "sanitizer_mutex.h" 23 24 #if defined(_MSC_VER) && !defined(__clang__) 25 extern "C" void _ReadWriteBarrier(); 26 #pragma intrinsic(_ReadWriteBarrier) 27 #endif 28 29 namespace __sanitizer { 30 31 struct AddressInfo; 32 struct BufferedStackTrace; 33 struct SignalContext; 34 struct StackTrace; 35 struct SymbolizedStack; 36 37 // Constants. 38 const uptr kWordSize = SANITIZER_WORDSIZE / 8; 39 const uptr kWordSizeInBits = 8 * kWordSize; 40 41 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; 42 43 const uptr kMaxPathLength = 4096; 44 45 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb 46 47 const uptr kErrorMessageBufferSize = 1 << 16; 48 49 // Denotes fake PC values that come from JIT/JAVA/etc. 50 // For such PC values __tsan_symbolize_external_ex() will be called. 51 const u64 kExternalPCBit = 1ULL << 60; 52 53 extern const char *SanitizerToolName; // Can be changed by the tool. 54 55 extern atomic_uint32_t current_verbosity; 56 inline void SetVerbosity(int verbosity) { 57 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); 58 } 59 inline int Verbosity() { 60 return atomic_load(¤t_verbosity, memory_order_relaxed); 61 } 62 63 #if SANITIZER_ANDROID && !defined(__aarch64__) 64 // 32-bit Android only has 4k pages. 65 inline uptr GetPageSize() { return 4096; } 66 inline uptr GetPageSizeCached() { return 4096; } 67 #else 68 uptr GetPageSize(); 69 extern uptr PageSizeCached; 70 inline uptr GetPageSizeCached() { 71 if (!PageSizeCached) 72 PageSizeCached = GetPageSize(); 73 return PageSizeCached; 74 } 75 #endif 76 77 uptr GetMmapGranularity(); 78 uptr GetMaxVirtualAddress(); 79 uptr GetMaxUserVirtualAddress(); 80 // Threads 81 tid_t GetTid(); 82 int TgKill(pid_t pid, tid_t tid, int sig); 83 uptr GetThreadSelf(); 84 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 85 uptr *stack_bottom); 86 void GetThreadStackAndTls(bool main, uptr *stk_begin, uptr *stk_end, 87 uptr *tls_begin, uptr *tls_end); 88 89 // Memory management 90 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); 91 92 inline void *MmapOrDieQuietly(uptr size, const char *mem_type) { 93 return MmapOrDie(size, mem_type, /*raw_report*/ true); 94 } 95 void UnmapOrDie(void *addr, uptr size, bool raw_report = false); 96 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that 97 // case returns nullptr. 98 void *MmapOrDieOnFatalError(uptr size, const char *mem_type); 99 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) 100 WARN_UNUSED_RESULT; 101 bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, 102 const char *name = nullptr) WARN_UNUSED_RESULT; 103 void *MmapNoReserveOrDie(uptr size, const char *mem_type); 104 void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 105 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in 106 // that case returns nullptr. 107 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, 108 const char *name = nullptr); 109 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); 110 void *MmapNoAccess(uptr size); 111 // Map aligned chunk of address space; size and alignment are powers of two. 112 // Dies on all but out of memory errors, in the latter case returns nullptr. 113 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 114 const char *mem_type); 115 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an 116 // unaccessible memory. 117 bool MprotectNoAccess(uptr addr, uptr size); 118 bool MprotectReadOnly(uptr addr, uptr size); 119 bool MprotectReadWrite(uptr addr, uptr size); 120 121 void MprotectMallocZones(void *addr, int prot); 122 123 #if SANITIZER_WINDOWS 124 // Zero previously mmap'd memory. Currently used only on Windows. 125 bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT; 126 #endif 127 128 #if SANITIZER_LINUX 129 // Unmap memory. Currently only used on Linux. 130 void UnmapFromTo(uptr from, uptr to); 131 #endif 132 133 // Maps shadow_size_bytes of shadow memory and returns shadow address. It will 134 // be aligned to the mmap granularity * 2^shadow_scale, or to 135 // 2^min_shadow_base_alignment if that is larger. The returned address will 136 // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and 137 // shadow_size_bytes bytes on the right, which on linux is mapped no access. 138 // The high_mem_end may be updated if the original shadow size doesn't fit. 139 uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 140 uptr min_shadow_base_alignment, uptr &high_mem_end, 141 uptr granularity); 142 143 // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size). 144 // Reserves 2*S bytes of address space to the right of the returned address and 145 // ring_buffer_size bytes to the left. The returned address is aligned to 2*S. 146 // Also creates num_aliases regions of accessible memory starting at offset S 147 // from the returned address. Each region has size alias_size and is backed by 148 // the same physical memory. 149 uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, 150 uptr num_aliases, uptr ring_buffer_size); 151 152 // Reserve memory range [beg, end]. If madvise_shadow is true then apply 153 // madvise (e.g. hugepages, core dumping) requested by options. 154 void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name, 155 bool madvise_shadow = true); 156 157 // Protect size bytes of memory starting at addr. Also try to protect 158 // several pages at the start of the address space as specified by 159 // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start. 160 void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start, 161 uptr zero_base_max_shadow_start); 162 163 // Find an available address space. 164 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 165 uptr *largest_gap_found, uptr *max_occupied_addr); 166 167 // Used to check if we can map shadow memory to a fixed location. 168 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); 169 // Releases memory pages entirely within the [beg, end] address range. Noop if 170 // the provided range does not contain at least one entire page. 171 void ReleaseMemoryPagesToOS(uptr beg, uptr end); 172 void IncreaseTotalMmap(uptr size); 173 void DecreaseTotalMmap(uptr size); 174 uptr GetRSS(); 175 void SetShadowRegionHugePageMode(uptr addr, uptr length); 176 bool DontDumpShadowMemory(uptr addr, uptr length); 177 // Check if the built VMA size matches the runtime one. 178 void CheckVMASize(); 179 void RunMallocHooks(void *ptr, uptr size); 180 int RunFreeHooks(void *ptr); 181 182 class ReservedAddressRange { 183 public: 184 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); 185 uptr InitAligned(uptr size, uptr align, const char *name = nullptr); 186 uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr); 187 uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 188 void Unmap(uptr addr, uptr size); 189 void *base() const { return base_; } 190 uptr size() const { return size_; } 191 192 private: 193 void* base_; 194 uptr size_; 195 const char* name_; 196 uptr os_handle_; 197 }; 198 199 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, 200 /*out*/ uptr *stats); 201 202 // Parse the contents of /proc/self/smaps and generate a memory profile. 203 // |cb| is a tool-specific callback that fills the |stats| array. 204 void GetMemoryProfile(fill_profile_f cb, uptr *stats); 205 void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps, 206 uptr smaps_len); 207 208 // Simple low-level (mmap-based) allocator for internal use. Doesn't have 209 // constructor, so all instances of LowLevelAllocator should be 210 // linker initialized. 211 // 212 // NOTE: Users should instead use the singleton provided via 213 // `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the 214 // number of mmap fragments can be reduced and use the same contiguous mmap 215 // provided by this singleton. 216 class LowLevelAllocator { 217 public: 218 // Requires an external lock. 219 void *Allocate(uptr size); 220 221 private: 222 char *allocated_end_; 223 char *allocated_current_; 224 }; 225 // Set the min alignment of LowLevelAllocator to at least alignment. 226 void SetLowLevelAllocateMinAlignment(uptr alignment); 227 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); 228 // Allows to register tool-specific callbacks for LowLevelAllocator. 229 // Passing NULL removes the callback. 230 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); 231 232 LowLevelAllocator &GetGlobalLowLevelAllocator(); 233 234 // IO 235 void CatastrophicErrorWrite(const char *buffer, uptr length); 236 void RawWrite(const char *buffer); 237 bool ColorizeReports(); 238 void RemoveANSIEscapeSequencesFromString(char *buffer); 239 void Printf(const char *format, ...) FORMAT(1, 2); 240 void Report(const char *format, ...) FORMAT(1, 2); 241 void SetPrintfAndReportCallback(void (*callback)(const char *)); 242 #define VReport(level, ...) \ 243 do { \ 244 if (UNLIKELY((uptr)Verbosity() >= (level))) \ 245 Report(__VA_ARGS__); \ 246 } while (0) 247 #define VPrintf(level, ...) \ 248 do { \ 249 if (UNLIKELY((uptr)Verbosity() >= (level))) \ 250 Printf(__VA_ARGS__); \ 251 } while (0) 252 253 // Lock sanitizer error reporting and protects against nested errors. 254 class ScopedErrorReportLock { 255 public: 256 ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); } 257 ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); } 258 259 static void Lock() SANITIZER_ACQUIRE(mutex_); 260 static void Unlock() SANITIZER_RELEASE(mutex_); 261 static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_); 262 263 private: 264 static atomic_uintptr_t reporting_thread_; 265 static StaticSpinMutex mutex_; 266 }; 267 268 extern uptr stoptheworld_tracer_pid; 269 extern uptr stoptheworld_tracer_ppid; 270 271 // Returns true if the entire range can be read. 272 bool IsAccessibleMemoryRange(uptr beg, uptr size); 273 // Attempts to copy `n` bytes from memory range starting at `src` to `dest`. 274 // Returns true if the entire range can be read. Returns `false` if any part of 275 // the source range cannot be read, in which case the contents of `dest` are 276 // undefined. 277 bool TryMemCpy(void *dest, const void *src, uptr n); 278 // Copies accessible memory, and zero fill inaccessible. 279 void MemCpyAccessible(void *dest, const void *src, uptr n); 280 281 // Error report formatting. 282 const char *StripPathPrefix(const char *filepath, 283 const char *strip_file_prefix); 284 // Strip the directories from the module name. 285 const char *StripModuleName(const char *module); 286 287 // OS 288 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); 289 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); 290 uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len); 291 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); 292 const char *GetProcessName(); 293 void UpdateProcessName(); 294 void CacheBinaryName(); 295 void DisableCoreDumperIfNecessary(); 296 void DumpProcessMap(); 297 const char *GetEnv(const char *name); 298 bool SetEnv(const char *name, const char *value); 299 300 u32 GetUid(); 301 void ReExec(); 302 void CheckASLR(); 303 void CheckMPROTECT(); 304 char **GetArgv(); 305 char **GetEnviron(); 306 void PrintCmdline(); 307 bool StackSizeIsUnlimited(); 308 void SetStackSizeLimitInBytes(uptr limit); 309 bool AddressSpaceIsUnlimited(); 310 void SetAddressSpaceUnlimited(); 311 void AdjustStackSize(void *attr); 312 void PlatformPrepareForSandboxing(void *args); 313 void SetSandboxingCallback(void (*f)()); 314 315 void InitializeCoverage(bool enabled, const char *coverage_dir); 316 317 void InitTlsSize(); 318 uptr GetTlsSize(); 319 320 // Other 321 void WaitForDebugger(unsigned seconds, const char *label); 322 void SleepForSeconds(unsigned seconds); 323 void SleepForMillis(unsigned millis); 324 u64 NanoTime(); 325 u64 MonotonicNanoTime(); 326 int Atexit(void (*function)(void)); 327 bool TemplateMatch(const char *templ, const char *str); 328 329 // Exit 330 void NORETURN Abort(); 331 void NORETURN Die(); 332 void NORETURN 333 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); 334 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, 335 const char *mmap_type, error_t err, 336 bool raw_report = false); 337 void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err, 338 bool raw_report = false); 339 340 // Returns true if the platform-specific error reported is an OOM error. 341 bool ErrorIsOOM(error_t err); 342 343 // This reports an error in the form: 344 // 345 // `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}` 346 // 347 // Downstream tools that read sanitizer output will know that errors starting 348 // in this format are specifically OOM errors. 349 #define ERROR_OOM(err_msg, ...) \ 350 Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__) 351 352 // Specific tools may override behavior of "Die" function to do tool-specific 353 // job. 354 typedef void (*DieCallbackType)(void); 355 356 // It's possible to add several callbacks that would be run when "Die" is 357 // called. The callbacks will be run in the opposite order. The tools are 358 // strongly recommended to setup all callbacks during initialization, when there 359 // is only a single thread. 360 bool AddDieCallback(DieCallbackType callback); 361 bool RemoveDieCallback(DieCallbackType callback); 362 363 void SetUserDieCallback(DieCallbackType callback); 364 365 void SetCheckUnwindCallback(void (*callback)()); 366 367 // Functions related to signal handling. 368 typedef void (*SignalHandlerType)(int, void *, void *); 369 HandleSignalMode GetHandleSignalMode(int signum); 370 void InstallDeadlySignalHandlers(SignalHandlerType handler); 371 372 // Signal reporting. 373 // Each sanitizer uses slightly different implementation of stack unwinding. 374 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig, 375 const void *callback_context, 376 BufferedStackTrace *stack); 377 // Print deadly signal report and die. 378 void HandleDeadlySignal(void *siginfo, void *context, u32 tid, 379 UnwindSignalStackCallbackType unwind, 380 const void *unwind_context); 381 382 // Part of HandleDeadlySignal, exposed for asan. 383 void StartReportDeadlySignal(); 384 // Part of HandleDeadlySignal, exposed for asan. 385 void ReportDeadlySignal(const SignalContext &sig, u32 tid, 386 UnwindSignalStackCallbackType unwind, 387 const void *unwind_context); 388 389 // Alternative signal stack (POSIX-only). 390 void SetAlternateSignalStack(); 391 void UnsetAlternateSignalStack(); 392 393 // Construct a one-line string: 394 // SUMMARY: SanitizerToolName: error_message 395 // and pass it to __sanitizer_report_error_summary. 396 // If alt_tool_name is provided, it's used in place of SanitizerToolName. 397 void ReportErrorSummary(const char *error_message, 398 const char *alt_tool_name = nullptr); 399 // Same as above, but construct error_message as: 400 // error_type file:line[:column][ function] 401 void ReportErrorSummary(const char *error_type, const AddressInfo &info, 402 const char *alt_tool_name = nullptr); 403 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. 404 void ReportErrorSummary(const char *error_type, const StackTrace *trace, 405 const char *alt_tool_name = nullptr); 406 // Skips frames which we consider internal and not usefull to the users. 407 const SymbolizedStack *SkipInternalFrames(const SymbolizedStack *frames); 408 409 void ReportMmapWriteExec(int prot, int mflags); 410 411 // Math 412 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) 413 extern "C" { 414 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); 415 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); 416 #if defined(_WIN64) 417 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); 418 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); 419 #endif 420 } 421 #endif 422 423 inline uptr MostSignificantSetBitIndex(uptr x) { 424 CHECK_NE(x, 0U); 425 unsigned long up; 426 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 427 # ifdef _WIN64 428 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); 429 # else 430 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); 431 # endif 432 #elif defined(_WIN64) 433 _BitScanReverse64(&up, x); 434 #else 435 _BitScanReverse(&up, x); 436 #endif 437 return up; 438 } 439 440 inline uptr LeastSignificantSetBitIndex(uptr x) { 441 CHECK_NE(x, 0U); 442 unsigned long up; 443 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 444 # ifdef _WIN64 445 up = __builtin_ctzll(x); 446 # else 447 up = __builtin_ctzl(x); 448 # endif 449 #elif defined(_WIN64) 450 _BitScanForward64(&up, x); 451 #else 452 _BitScanForward(&up, x); 453 #endif 454 return up; 455 } 456 457 inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } 458 459 inline uptr RoundUpToPowerOfTwo(uptr size) { 460 CHECK(size); 461 if (IsPowerOfTwo(size)) return size; 462 463 uptr up = MostSignificantSetBitIndex(size); 464 CHECK_LT(size, (1ULL << (up + 1))); 465 CHECK_GT(size, (1ULL << up)); 466 return 1ULL << (up + 1); 467 } 468 469 inline constexpr uptr RoundUpTo(uptr size, uptr boundary) { 470 RAW_CHECK(IsPowerOfTwo(boundary)); 471 return (size + boundary - 1) & ~(boundary - 1); 472 } 473 474 inline constexpr uptr RoundDownTo(uptr x, uptr boundary) { 475 return x & ~(boundary - 1); 476 } 477 478 inline constexpr bool IsAligned(uptr a, uptr alignment) { 479 return (a & (alignment - 1)) == 0; 480 } 481 482 inline uptr Log2(uptr x) { 483 CHECK(IsPowerOfTwo(x)); 484 return LeastSignificantSetBitIndex(x); 485 } 486 487 // Don't use std::min, std::max or std::swap, to minimize dependency 488 // on libstdc++. 489 template <class T> 490 constexpr T Min(T a, T b) { 491 return a < b ? a : b; 492 } 493 template <class T> 494 constexpr T Max(T a, T b) { 495 return a > b ? a : b; 496 } 497 template <class T> 498 constexpr T Abs(T a) { 499 return a < 0 ? -a : a; 500 } 501 template<class T> void Swap(T& a, T& b) { 502 T tmp = a; 503 a = b; 504 b = tmp; 505 } 506 507 // Char handling 508 inline bool IsSpace(int c) { 509 return (c == ' ') || (c == '\n') || (c == '\t') || 510 (c == '\f') || (c == '\r') || (c == '\v'); 511 } 512 inline bool IsDigit(int c) { 513 return (c >= '0') && (c <= '9'); 514 } 515 inline int ToLower(int c) { 516 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; 517 } 518 519 // A low-level vector based on mmap. May incur a significant memory overhead for 520 // small vectors. 521 // WARNING: The current implementation supports only POD types. 522 template <typename T, bool raw_report = false> 523 class InternalMmapVectorNoCtor { 524 public: 525 using value_type = T; 526 void Initialize(uptr initial_capacity) { 527 capacity_bytes_ = 0; 528 size_ = 0; 529 data_ = 0; 530 reserve(initial_capacity); 531 } 532 void Destroy() { UnmapOrDie(data_, capacity_bytes_, raw_report); } 533 T &operator[](uptr i) { 534 CHECK_LT(i, size_); 535 return data_[i]; 536 } 537 const T &operator[](uptr i) const { 538 CHECK_LT(i, size_); 539 return data_[i]; 540 } 541 void push_back(const T &element) { 542 if (UNLIKELY(size_ >= capacity())) { 543 CHECK_EQ(size_, capacity()); 544 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); 545 Realloc(new_capacity); 546 } 547 internal_memcpy(&data_[size_++], &element, sizeof(T)); 548 } 549 T &back() { 550 CHECK_GT(size_, 0); 551 return data_[size_ - 1]; 552 } 553 void pop_back() { 554 CHECK_GT(size_, 0); 555 size_--; 556 } 557 uptr size() const { 558 return size_; 559 } 560 const T *data() const { 561 return data_; 562 } 563 T *data() { 564 return data_; 565 } 566 uptr capacity() const { return capacity_bytes_ / sizeof(T); } 567 void reserve(uptr new_size) { 568 // Never downsize internal buffer. 569 if (new_size > capacity()) 570 Realloc(new_size); 571 } 572 void resize(uptr new_size) { 573 if (new_size > size_) { 574 reserve(new_size); 575 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); 576 } 577 size_ = new_size; 578 } 579 580 void clear() { size_ = 0; } 581 bool empty() const { return size() == 0; } 582 583 const T *begin() const { 584 return data(); 585 } 586 T *begin() { 587 return data(); 588 } 589 const T *end() const { 590 return data() + size(); 591 } 592 T *end() { 593 return data() + size(); 594 } 595 596 void swap(InternalMmapVectorNoCtor &other) { 597 Swap(data_, other.data_); 598 Swap(capacity_bytes_, other.capacity_bytes_); 599 Swap(size_, other.size_); 600 } 601 602 private: 603 NOINLINE void Realloc(uptr new_capacity) { 604 CHECK_GT(new_capacity, 0); 605 CHECK_LE(size_, new_capacity); 606 uptr new_capacity_bytes = 607 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); 608 T *new_data = 609 (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector", raw_report); 610 internal_memcpy(new_data, data_, size_ * sizeof(T)); 611 UnmapOrDie(data_, capacity_bytes_, raw_report); 612 data_ = new_data; 613 capacity_bytes_ = new_capacity_bytes; 614 } 615 616 T *data_; 617 uptr capacity_bytes_; 618 uptr size_; 619 }; 620 621 template <typename T> 622 bool operator==(const InternalMmapVectorNoCtor<T> &lhs, 623 const InternalMmapVectorNoCtor<T> &rhs) { 624 if (lhs.size() != rhs.size()) return false; 625 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; 626 } 627 628 template <typename T> 629 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, 630 const InternalMmapVectorNoCtor<T> &rhs) { 631 return !(lhs == rhs); 632 } 633 634 template<typename T> 635 class InternalMmapVector : public InternalMmapVectorNoCtor<T> { 636 public: 637 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); } 638 explicit InternalMmapVector(uptr cnt) { 639 InternalMmapVectorNoCtor<T>::Initialize(cnt); 640 this->resize(cnt); 641 } 642 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } 643 // Disallow copies and moves. 644 InternalMmapVector(const InternalMmapVector &) = delete; 645 InternalMmapVector &operator=(const InternalMmapVector &) = delete; 646 InternalMmapVector(InternalMmapVector &&) = delete; 647 InternalMmapVector &operator=(InternalMmapVector &&) = delete; 648 }; 649 650 class InternalScopedString { 651 public: 652 InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; } 653 654 uptr length() const { return buffer_.size() - 1; } 655 void clear() { 656 buffer_.resize(1); 657 buffer_[0] = '\0'; 658 } 659 void Append(const char *str); 660 void AppendF(const char *format, ...) FORMAT(2, 3); 661 const char *data() const { return buffer_.data(); } 662 char *data() { return buffer_.data(); } 663 664 private: 665 InternalMmapVector<char> buffer_; 666 }; 667 668 template <class T> 669 struct CompareLess { 670 bool operator()(const T &a, const T &b) const { return a < b; } 671 }; 672 673 // HeapSort for arrays and InternalMmapVector. 674 template <class T, class Compare = CompareLess<T>> 675 void Sort(T *v, uptr size, Compare comp = {}) { 676 if (size < 2) 677 return; 678 // Stage 1: insert elements to the heap. 679 for (uptr i = 1; i < size; i++) { 680 uptr j, p; 681 for (j = i; j > 0; j = p) { 682 p = (j - 1) / 2; 683 if (comp(v[p], v[j])) 684 Swap(v[j], v[p]); 685 else 686 break; 687 } 688 } 689 // Stage 2: swap largest element with the last one, 690 // and sink the new top. 691 for (uptr i = size - 1; i > 0; i--) { 692 Swap(v[0], v[i]); 693 uptr j, max_ind; 694 for (j = 0; j < i; j = max_ind) { 695 uptr left = 2 * j + 1; 696 uptr right = 2 * j + 2; 697 max_ind = j; 698 if (left < i && comp(v[max_ind], v[left])) 699 max_ind = left; 700 if (right < i && comp(v[max_ind], v[right])) 701 max_ind = right; 702 if (max_ind != j) 703 Swap(v[j], v[max_ind]); 704 else 705 break; 706 } 707 } 708 } 709 710 // Works like std::lower_bound: finds the first element that is not less 711 // than the val. 712 template <class Container, class T, 713 class Compare = CompareLess<typename Container::value_type>> 714 uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) { 715 uptr first = 0; 716 uptr last = v.size(); 717 while (last > first) { 718 uptr mid = (first + last) / 2; 719 if (comp(v[mid], val)) 720 first = mid + 1; 721 else 722 last = mid; 723 } 724 return first; 725 } 726 727 enum ModuleArch { 728 kModuleArchUnknown, 729 kModuleArchI386, 730 kModuleArchX86_64, 731 kModuleArchX86_64H, 732 kModuleArchARMV6, 733 kModuleArchARMV7, 734 kModuleArchARMV7S, 735 kModuleArchARMV7K, 736 kModuleArchARM64, 737 kModuleArchLoongArch64, 738 kModuleArchRISCV64, 739 kModuleArchHexagon 740 }; 741 742 // Sorts and removes duplicates from the container. 743 template <class Container, 744 class Compare = CompareLess<typename Container::value_type>> 745 void SortAndDedup(Container &v, Compare comp = {}) { 746 Sort(v.data(), v.size(), comp); 747 uptr size = v.size(); 748 if (size < 2) 749 return; 750 uptr last = 0; 751 for (uptr i = 1; i < size; ++i) { 752 if (comp(v[last], v[i])) { 753 ++last; 754 if (last != i) 755 v[last] = v[i]; 756 } else { 757 CHECK(!comp(v[i], v[last])); 758 } 759 } 760 v.resize(last + 1); 761 } 762 763 constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28); 764 765 // Opens the file 'file_name" and reads up to 'max_len' bytes. 766 // The resulting buffer is mmaped and stored in '*buff'. 767 // Returns true if file was successfully opened and read. 768 bool ReadFileToVector(const char *file_name, 769 InternalMmapVectorNoCtor<char> *buff, 770 uptr max_len = kDefaultFileMaxSize, 771 error_t *errno_p = nullptr); 772 773 // Opens the file 'file_name" and reads up to 'max_len' bytes. 774 // This function is less I/O efficient than ReadFileToVector as it may reread 775 // file multiple times to avoid mmap during read attempts. It's used to read 776 // procmap, so short reads with mmap in between can produce inconsistent result. 777 // The resulting buffer is mmaped and stored in '*buff'. 778 // The size of the mmaped region is stored in '*buff_size'. 779 // The total number of read bytes is stored in '*read_len'. 780 // Returns true if file was successfully opened and read. 781 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 782 uptr *read_len, uptr max_len = kDefaultFileMaxSize, 783 error_t *errno_p = nullptr); 784 785 int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len, 786 uptr *pc_offset); 787 788 // When adding a new architecture, don't forget to also update 789 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp. 790 inline const char *ModuleArchToString(ModuleArch arch) { 791 switch (arch) { 792 case kModuleArchUnknown: 793 return ""; 794 case kModuleArchI386: 795 return "i386"; 796 case kModuleArchX86_64: 797 return "x86_64"; 798 case kModuleArchX86_64H: 799 return "x86_64h"; 800 case kModuleArchARMV6: 801 return "armv6"; 802 case kModuleArchARMV7: 803 return "armv7"; 804 case kModuleArchARMV7S: 805 return "armv7s"; 806 case kModuleArchARMV7K: 807 return "armv7k"; 808 case kModuleArchARM64: 809 return "arm64"; 810 case kModuleArchLoongArch64: 811 return "loongarch64"; 812 case kModuleArchRISCV64: 813 return "riscv64"; 814 case kModuleArchHexagon: 815 return "hexagon"; 816 } 817 CHECK(0 && "Invalid module arch"); 818 return ""; 819 } 820 821 #if SANITIZER_APPLE 822 const uptr kModuleUUIDSize = 16; 823 #else 824 const uptr kModuleUUIDSize = 32; 825 #endif 826 const uptr kMaxSegName = 16; 827 828 // Represents a binary loaded into virtual memory (e.g. this can be an 829 // executable or a shared object). 830 class LoadedModule { 831 public: 832 LoadedModule() 833 : full_name_(nullptr), 834 base_address_(0), 835 max_address_(0), 836 arch_(kModuleArchUnknown), 837 uuid_size_(0), 838 instrumented_(false) { 839 internal_memset(uuid_, 0, kModuleUUIDSize); 840 ranges_.clear(); 841 } 842 void set(const char *module_name, uptr base_address); 843 void set(const char *module_name, uptr base_address, ModuleArch arch, 844 u8 uuid[kModuleUUIDSize], bool instrumented); 845 void setUuid(const char *uuid, uptr size); 846 void clear(); 847 void addAddressRange(uptr beg, uptr end, bool executable, bool writable, 848 const char *name = nullptr); 849 bool containsAddress(uptr address) const; 850 851 const char *full_name() const { return full_name_; } 852 uptr base_address() const { return base_address_; } 853 uptr max_address() const { return max_address_; } 854 ModuleArch arch() const { return arch_; } 855 const u8 *uuid() const { return uuid_; } 856 uptr uuid_size() const { return uuid_size_; } 857 bool instrumented() const { return instrumented_; } 858 859 struct AddressRange { 860 AddressRange *next; 861 uptr beg; 862 uptr end; 863 bool executable; 864 bool writable; 865 char name[kMaxSegName]; 866 867 AddressRange(uptr beg, uptr end, bool executable, bool writable, 868 const char *name) 869 : next(nullptr), 870 beg(beg), 871 end(end), 872 executable(executable), 873 writable(writable) { 874 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name)); 875 } 876 }; 877 878 const IntrusiveList<AddressRange> &ranges() const { return ranges_; } 879 880 private: 881 char *full_name_; // Owned. 882 uptr base_address_; 883 uptr max_address_; 884 ModuleArch arch_; 885 uptr uuid_size_; 886 u8 uuid_[kModuleUUIDSize]; 887 bool instrumented_; 888 IntrusiveList<AddressRange> ranges_; 889 }; 890 891 // List of LoadedModules. OS-dependent implementation is responsible for 892 // filling this information. 893 class ListOfModules { 894 public: 895 ListOfModules() : initialized(false) {} 896 ~ListOfModules() { clear(); } 897 void init(); 898 void fallbackInit(); // Uses fallback init if available, otherwise clears 899 const LoadedModule *begin() const { return modules_.begin(); } 900 LoadedModule *begin() { return modules_.begin(); } 901 const LoadedModule *end() const { return modules_.end(); } 902 LoadedModule *end() { return modules_.end(); } 903 uptr size() const { return modules_.size(); } 904 const LoadedModule &operator[](uptr i) const { 905 CHECK_LT(i, modules_.size()); 906 return modules_[i]; 907 } 908 909 private: 910 void clear() { 911 for (auto &module : modules_) module.clear(); 912 modules_.clear(); 913 } 914 void clearOrInit() { 915 initialized ? clear() : modules_.Initialize(kInitialCapacity); 916 initialized = true; 917 } 918 919 InternalMmapVectorNoCtor<LoadedModule> modules_; 920 // We rarely have more than 16K loaded modules. 921 static const uptr kInitialCapacity = 1 << 14; 922 bool initialized; 923 }; 924 925 // Callback type for iterating over a set of memory ranges. 926 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); 927 928 enum AndroidApiLevel { 929 ANDROID_NOT_ANDROID = 0, 930 ANDROID_LOLLIPOP_MR1 = 22, 931 ANDROID_POST_LOLLIPOP = 23 932 }; 933 934 void WriteToSyslog(const char *buffer); 935 936 #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__) 937 #define SANITIZER_WIN_TRACE 1 938 #else 939 #define SANITIZER_WIN_TRACE 0 940 #endif 941 942 #if SANITIZER_APPLE || SANITIZER_WIN_TRACE 943 void LogFullErrorReport(const char *buffer); 944 #else 945 inline void LogFullErrorReport(const char *buffer) {} 946 #endif 947 948 #if SANITIZER_LINUX || SANITIZER_APPLE 949 void WriteOneLineToSyslog(const char *s); 950 void LogMessageOnPrintf(const char *str); 951 #else 952 inline void WriteOneLineToSyslog(const char *s) {} 953 inline void LogMessageOnPrintf(const char *str) {} 954 #endif 955 956 #if SANITIZER_LINUX || SANITIZER_WIN_TRACE 957 // Initialize Android logging. Any writes before this are silently lost. 958 void AndroidLogInit(); 959 void SetAbortMessage(const char *); 960 #else 961 inline void AndroidLogInit() {} 962 // FIXME: MacOS implementation could use CRSetCrashLogMessage. 963 inline void SetAbortMessage(const char *) {} 964 #endif 965 966 #if SANITIZER_ANDROID 967 void SanitizerInitializeUnwinder(); 968 AndroidApiLevel AndroidGetApiLevel(); 969 #else 970 inline void AndroidLogWrite(const char *buffer_unused) {} 971 inline void SanitizerInitializeUnwinder() {} 972 inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } 973 #endif 974 975 inline uptr GetPthreadDestructorIterations() { 976 #if SANITIZER_ANDROID 977 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; 978 #elif SANITIZER_POSIX 979 return 4; 980 #else 981 // Unused on Windows. 982 return 0; 983 #endif 984 } 985 986 void *internal_start_thread(void *(*func)(void*), void *arg); 987 void internal_join_thread(void *th); 988 void MaybeStartBackgroudThread(); 989 990 // Make the compiler think that something is going on there. 991 // Use this inside a loop that looks like memset/memcpy/etc to prevent the 992 // compiler from recognising it and turning it into an actual call to 993 // memset/memcpy/etc. 994 static inline void SanitizerBreakOptimization(void *arg) { 995 #if defined(_MSC_VER) && !defined(__clang__) 996 _ReadWriteBarrier(); 997 #else 998 __asm__ __volatile__("" : : "r" (arg) : "memory"); 999 #endif 1000 } 1001 1002 struct SignalContext { 1003 void *siginfo; 1004 void *context; 1005 uptr addr; 1006 uptr pc; 1007 uptr sp; 1008 uptr bp; 1009 bool is_memory_access; 1010 enum WriteFlag { Unknown, Read, Write } write_flag; 1011 1012 // In some cases the kernel cannot provide the true faulting address; `addr` 1013 // will be zero then. This field allows to distinguish between these cases 1014 // and dereferences of null. 1015 bool is_true_faulting_addr; 1016 1017 // VS2013 doesn't implement unrestricted unions, so we need a trivial default 1018 // constructor 1019 SignalContext() = default; 1020 1021 // Creates signal context in a platform-specific manner. 1022 // SignalContext is going to keep pointers to siginfo and context without 1023 // owning them. 1024 SignalContext(void *siginfo, void *context) 1025 : siginfo(siginfo), 1026 context(context), 1027 addr(GetAddress()), 1028 is_memory_access(IsMemoryAccess()), 1029 write_flag(GetWriteFlag()), 1030 is_true_faulting_addr(IsTrueFaultingAddress()) { 1031 InitPcSpBp(); 1032 } 1033 1034 static void DumpAllRegisters(void *context); 1035 1036 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION. 1037 int GetType() const; 1038 1039 // String description of the signal. 1040 const char *Describe() const; 1041 1042 // Returns true if signal is stack overflow. 1043 bool IsStackOverflow() const; 1044 1045 private: 1046 // Platform specific initialization. 1047 void InitPcSpBp(); 1048 uptr GetAddress() const; 1049 WriteFlag GetWriteFlag() const; 1050 bool IsMemoryAccess() const; 1051 bool IsTrueFaultingAddress() const; 1052 }; 1053 1054 void InitializePlatformEarly(); 1055 1056 template <typename Fn> 1057 class RunOnDestruction { 1058 public: 1059 explicit RunOnDestruction(Fn fn) : fn_(fn) {} 1060 ~RunOnDestruction() { fn_(); } 1061 1062 private: 1063 Fn fn_; 1064 }; 1065 1066 // A simple scope guard. Usage: 1067 // auto cleanup = at_scope_exit([]{ do_cleanup; }); 1068 template <typename Fn> 1069 RunOnDestruction<Fn> at_scope_exit(Fn fn) { 1070 return RunOnDestruction<Fn>(fn); 1071 } 1072 1073 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine 1074 // if a process uses virtual memory over 4TB (as many sanitizers like 1075 // to do). This function will abort the process if running on a kernel 1076 // that looks vulnerable. 1077 #if SANITIZER_LINUX && SANITIZER_S390_64 1078 void AvoidCVE_2016_2143(); 1079 #else 1080 inline void AvoidCVE_2016_2143() {} 1081 #endif 1082 1083 struct StackDepotStats { 1084 uptr n_uniq_ids; 1085 uptr allocated; 1086 }; 1087 1088 // The default value for allocator_release_to_os_interval_ms common flag to 1089 // indicate that sanitizer allocator should not attempt to release memory to OS. 1090 const s32 kReleaseToOSIntervalNever = -1; 1091 1092 void CheckNoDeepBind(const char *filename, int flag); 1093 1094 // Returns the requested amount of random data (up to 256 bytes) that can then 1095 // be used to seed a PRNG. Defaults to blocking like the underlying syscall. 1096 bool GetRandom(void *buffer, uptr length, bool blocking = true); 1097 1098 // Returns the number of logical processors on the system. 1099 u32 GetNumberOfCPUs(); 1100 extern u32 NumberOfCPUsCached; 1101 inline u32 GetNumberOfCPUsCached() { 1102 if (!NumberOfCPUsCached) 1103 NumberOfCPUsCached = GetNumberOfCPUs(); 1104 return NumberOfCPUsCached; 1105 } 1106 1107 } // namespace __sanitizer 1108 1109 inline void *operator new(__sanitizer::usize size, 1110 __sanitizer::LowLevelAllocator &alloc) { 1111 return alloc.Allocate(size); 1112 } 1113 1114 #endif // SANITIZER_COMMON_H 1115