1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===----------------------------------------------------------------------===// 7 // 8 // This file is shared between run-time libraries of sanitizers. 9 // 10 // It declares common functions and classes that are used in both runtimes. 11 // Implementation of some functions are provided in sanitizer_common, while 12 // others must be defined by run-time library itself. 13 //===----------------------------------------------------------------------===// 14 #ifndef SANITIZER_COMMON_H 15 #define SANITIZER_COMMON_H 16 17 #include "sanitizer_flags.h" 18 #include "sanitizer_interface_internal.h" 19 #include "sanitizer_internal_defs.h" 20 #include "sanitizer_libc.h" 21 #include "sanitizer_list.h" 22 #include "sanitizer_mutex.h" 23 24 #if defined(_MSC_VER) && !defined(__clang__) 25 extern "C" void _ReadWriteBarrier(); 26 #pragma intrinsic(_ReadWriteBarrier) 27 #endif 28 29 namespace __sanitizer { 30 31 struct AddressInfo; 32 struct BufferedStackTrace; 33 struct SignalContext; 34 struct StackTrace; 35 36 // Constants. 37 const uptr kWordSize = SANITIZER_WORDSIZE / 8; 38 const uptr kWordSizeInBits = 8 * kWordSize; 39 40 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; 41 42 const uptr kMaxPathLength = 4096; 43 44 const uptr kMaxThreadStackSize = 1 << 30; // 1Gb 45 46 static const uptr kErrorMessageBufferSize = 1 << 16; 47 48 // Denotes fake PC values that come from JIT/JAVA/etc. 49 // For such PC values __tsan_symbolize_external_ex() will be called. 50 const u64 kExternalPCBit = 1ULL << 60; 51 52 extern const char *SanitizerToolName; // Can be changed by the tool. 53 54 extern atomic_uint32_t current_verbosity; 55 INLINE void SetVerbosity(int verbosity) { 56 atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); 57 } 58 INLINE int Verbosity() { 59 return atomic_load(¤t_verbosity, memory_order_relaxed); 60 } 61 62 uptr GetPageSize(); 63 extern uptr PageSizeCached; 64 INLINE uptr GetPageSizeCached() { 65 if (!PageSizeCached) 66 PageSizeCached = GetPageSize(); 67 return PageSizeCached; 68 } 69 uptr GetMmapGranularity(); 70 uptr GetMaxVirtualAddress(); 71 uptr GetMaxUserVirtualAddress(); 72 // Threads 73 tid_t GetTid(); 74 int TgKill(pid_t pid, tid_t tid, int sig); 75 uptr GetThreadSelf(); 76 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 77 uptr *stack_bottom); 78 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 79 uptr *tls_addr, uptr *tls_size); 80 81 // Memory management 82 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); 83 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) { 84 return MmapOrDie(size, mem_type, /*raw_report*/ true); 85 } 86 void UnmapOrDie(void *addr, uptr size); 87 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that 88 // case returns nullptr. 89 void *MmapOrDieOnFatalError(uptr size, const char *mem_type); 90 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) 91 WARN_UNUSED_RESULT; 92 void *MmapNoReserveOrDie(uptr size, const char *mem_type); 93 void *MmapFixedOrDie(uptr fixed_addr, uptr size); 94 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in 95 // that case returns nullptr. 96 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size); 97 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); 98 void *MmapNoAccess(uptr size); 99 // Map aligned chunk of address space; size and alignment are powers of two. 100 // Dies on all but out of memory errors, in the latter case returns nullptr. 101 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 102 const char *mem_type); 103 // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an 104 // unaccessible memory. 105 bool MprotectNoAccess(uptr addr, uptr size); 106 bool MprotectReadOnly(uptr addr, uptr size); 107 108 void MprotectMallocZones(void *addr, int prot); 109 110 // Find an available address space. 111 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 112 uptr *largest_gap_found, uptr *max_occupied_addr); 113 114 // Used to check if we can map shadow memory to a fixed location. 115 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); 116 // Releases memory pages entirely within the [beg, end] address range. Noop if 117 // the provided range does not contain at least one entire page. 118 void ReleaseMemoryPagesToOS(uptr beg, uptr end); 119 void IncreaseTotalMmap(uptr size); 120 void DecreaseTotalMmap(uptr size); 121 uptr GetRSS(); 122 bool NoHugePagesInRegion(uptr addr, uptr length); 123 bool DontDumpShadowMemory(uptr addr, uptr length); 124 // Check if the built VMA size matches the runtime one. 125 void CheckVMASize(); 126 void RunMallocHooks(const void *ptr, uptr size); 127 void RunFreeHooks(const void *ptr); 128 129 class ReservedAddressRange { 130 public: 131 uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); 132 uptr Map(uptr fixed_addr, uptr size); 133 uptr MapOrDie(uptr fixed_addr, uptr size); 134 void Unmap(uptr addr, uptr size); 135 void *base() const { return base_; } 136 uptr size() const { return size_; } 137 138 private: 139 void* base_; 140 uptr size_; 141 const char* name_; 142 uptr os_handle_; 143 }; 144 145 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, 146 /*out*/uptr *stats, uptr stats_size); 147 148 // Parse the contents of /proc/self/smaps and generate a memory profile. 149 // |cb| is a tool-specific callback that fills the |stats| array containing 150 // |stats_size| elements. 151 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size); 152 153 // Simple low-level (mmap-based) allocator for internal use. Doesn't have 154 // constructor, so all instances of LowLevelAllocator should be 155 // linker initialized. 156 class LowLevelAllocator { 157 public: 158 // Requires an external lock. 159 void *Allocate(uptr size); 160 private: 161 char *allocated_end_; 162 char *allocated_current_; 163 }; 164 // Set the min alignment of LowLevelAllocator to at least alignment. 165 void SetLowLevelAllocateMinAlignment(uptr alignment); 166 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); 167 // Allows to register tool-specific callbacks for LowLevelAllocator. 168 // Passing NULL removes the callback. 169 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); 170 171 // IO 172 void CatastrophicErrorWrite(const char *buffer, uptr length); 173 void RawWrite(const char *buffer); 174 bool ColorizeReports(); 175 void RemoveANSIEscapeSequencesFromString(char *buffer); 176 void Printf(const char *format, ...); 177 void Report(const char *format, ...); 178 void SetPrintfAndReportCallback(void (*callback)(const char *)); 179 #define VReport(level, ...) \ 180 do { \ 181 if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ 182 } while (0) 183 #define VPrintf(level, ...) \ 184 do { \ 185 if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ 186 } while (0) 187 188 // Lock sanitizer error reporting and protects against nested errors. 189 class ScopedErrorReportLock { 190 public: 191 ScopedErrorReportLock(); 192 ~ScopedErrorReportLock(); 193 194 static void CheckLocked(); 195 }; 196 197 extern uptr stoptheworld_tracer_pid; 198 extern uptr stoptheworld_tracer_ppid; 199 200 bool IsAccessibleMemoryRange(uptr beg, uptr size); 201 202 // Error report formatting. 203 const char *StripPathPrefix(const char *filepath, 204 const char *strip_file_prefix); 205 // Strip the directories from the module name. 206 const char *StripModuleName(const char *module); 207 208 // OS 209 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); 210 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); 211 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); 212 const char *GetProcessName(); 213 void UpdateProcessName(); 214 void CacheBinaryName(); 215 void DisableCoreDumperIfNecessary(); 216 void DumpProcessMap(); 217 void PrintModuleMap(); 218 const char *GetEnv(const char *name); 219 bool SetEnv(const char *name, const char *value); 220 221 u32 GetUid(); 222 void ReExec(); 223 void CheckASLR(); 224 char **GetArgv(); 225 void PrintCmdline(); 226 bool StackSizeIsUnlimited(); 227 uptr GetStackSizeLimitInBytes(); 228 void SetStackSizeLimitInBytes(uptr limit); 229 bool AddressSpaceIsUnlimited(); 230 void SetAddressSpaceUnlimited(); 231 void AdjustStackSize(void *attr); 232 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args); 233 void SetSandboxingCallback(void (*f)()); 234 235 void InitializeCoverage(bool enabled, const char *coverage_dir); 236 237 void InitTlsSize(); 238 uptr GetTlsSize(); 239 240 // Other 241 void SleepForSeconds(int seconds); 242 void SleepForMillis(int millis); 243 u64 NanoTime(); 244 u64 MonotonicNanoTime(); 245 int Atexit(void (*function)(void)); 246 bool TemplateMatch(const char *templ, const char *str); 247 248 // Exit 249 void NORETURN Abort(); 250 void NORETURN Die(); 251 void NORETURN 252 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); 253 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, 254 const char *mmap_type, error_t err, 255 bool raw_report = false); 256 257 // Specific tools may override behavior of "Die" and "CheckFailed" functions 258 // to do tool-specific job. 259 typedef void (*DieCallbackType)(void); 260 261 // It's possible to add several callbacks that would be run when "Die" is 262 // called. The callbacks will be run in the opposite order. The tools are 263 // strongly recommended to setup all callbacks during initialization, when there 264 // is only a single thread. 265 bool AddDieCallback(DieCallbackType callback); 266 bool RemoveDieCallback(DieCallbackType callback); 267 268 void SetUserDieCallback(DieCallbackType callback); 269 270 typedef void (*CheckFailedCallbackType)(const char *, int, const char *, 271 u64, u64); 272 void SetCheckFailedCallback(CheckFailedCallbackType callback); 273 274 // Callback will be called if soft_rss_limit_mb is given and the limit is 275 // exceeded (exceeded==true) or if rss went down below the limit 276 // (exceeded==false). 277 // The callback should be registered once at the tool init time. 278 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)); 279 280 // Functions related to signal handling. 281 typedef void (*SignalHandlerType)(int, void *, void *); 282 HandleSignalMode GetHandleSignalMode(int signum); 283 void InstallDeadlySignalHandlers(SignalHandlerType handler); 284 285 // Signal reporting. 286 // Each sanitizer uses slightly different implementation of stack unwinding. 287 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig, 288 const void *callback_context, 289 BufferedStackTrace *stack); 290 // Print deadly signal report and die. 291 void HandleDeadlySignal(void *siginfo, void *context, u32 tid, 292 UnwindSignalStackCallbackType unwind, 293 const void *unwind_context); 294 295 // Part of HandleDeadlySignal, exposed for asan. 296 void StartReportDeadlySignal(); 297 // Part of HandleDeadlySignal, exposed for asan. 298 void ReportDeadlySignal(const SignalContext &sig, u32 tid, 299 UnwindSignalStackCallbackType unwind, 300 const void *unwind_context); 301 302 // Alternative signal stack (POSIX-only). 303 void SetAlternateSignalStack(); 304 void UnsetAlternateSignalStack(); 305 306 // We don't want a summary too long. 307 const int kMaxSummaryLength = 1024; 308 // Construct a one-line string: 309 // SUMMARY: SanitizerToolName: error_message 310 // and pass it to __sanitizer_report_error_summary. 311 // If alt_tool_name is provided, it's used in place of SanitizerToolName. 312 void ReportErrorSummary(const char *error_message, 313 const char *alt_tool_name = nullptr); 314 // Same as above, but construct error_message as: 315 // error_type file:line[:column][ function] 316 void ReportErrorSummary(const char *error_type, const AddressInfo &info, 317 const char *alt_tool_name = nullptr); 318 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. 319 void ReportErrorSummary(const char *error_type, const StackTrace *trace, 320 const char *alt_tool_name = nullptr); 321 322 void ReportMmapWriteExec(int prot); 323 324 // Math 325 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) 326 extern "C" { 327 unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT 328 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT 329 #if defined(_WIN64) 330 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT 331 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT 332 #endif 333 } 334 #endif 335 336 INLINE uptr MostSignificantSetBitIndex(uptr x) { 337 CHECK_NE(x, 0U); 338 unsigned long up; // NOLINT 339 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 340 # ifdef _WIN64 341 up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); 342 # else 343 up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); 344 # endif 345 #elif defined(_WIN64) 346 _BitScanReverse64(&up, x); 347 #else 348 _BitScanReverse(&up, x); 349 #endif 350 return up; 351 } 352 353 INLINE uptr LeastSignificantSetBitIndex(uptr x) { 354 CHECK_NE(x, 0U); 355 unsigned long up; // NOLINT 356 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 357 # ifdef _WIN64 358 up = __builtin_ctzll(x); 359 # else 360 up = __builtin_ctzl(x); 361 # endif 362 #elif defined(_WIN64) 363 _BitScanForward64(&up, x); 364 #else 365 _BitScanForward(&up, x); 366 #endif 367 return up; 368 } 369 370 INLINE bool IsPowerOfTwo(uptr x) { 371 return (x & (x - 1)) == 0; 372 } 373 374 INLINE uptr RoundUpToPowerOfTwo(uptr size) { 375 CHECK(size); 376 if (IsPowerOfTwo(size)) return size; 377 378 uptr up = MostSignificantSetBitIndex(size); 379 CHECK_LT(size, (1ULL << (up + 1))); 380 CHECK_GT(size, (1ULL << up)); 381 return 1ULL << (up + 1); 382 } 383 384 INLINE uptr RoundUpTo(uptr size, uptr boundary) { 385 RAW_CHECK(IsPowerOfTwo(boundary)); 386 return (size + boundary - 1) & ~(boundary - 1); 387 } 388 389 INLINE uptr RoundDownTo(uptr x, uptr boundary) { 390 return x & ~(boundary - 1); 391 } 392 393 INLINE bool IsAligned(uptr a, uptr alignment) { 394 return (a & (alignment - 1)) == 0; 395 } 396 397 INLINE uptr Log2(uptr x) { 398 CHECK(IsPowerOfTwo(x)); 399 return LeastSignificantSetBitIndex(x); 400 } 401 402 // Don't use std::min, std::max or std::swap, to minimize dependency 403 // on libstdc++. 404 template<class T> T Min(T a, T b) { return a < b ? a : b; } 405 template<class T> T Max(T a, T b) { return a > b ? a : b; } 406 template<class T> void Swap(T& a, T& b) { 407 T tmp = a; 408 a = b; 409 b = tmp; 410 } 411 412 // Char handling 413 INLINE bool IsSpace(int c) { 414 return (c == ' ') || (c == '\n') || (c == '\t') || 415 (c == '\f') || (c == '\r') || (c == '\v'); 416 } 417 INLINE bool IsDigit(int c) { 418 return (c >= '0') && (c <= '9'); 419 } 420 INLINE int ToLower(int c) { 421 return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; 422 } 423 424 // A low-level vector based on mmap. May incur a significant memory overhead for 425 // small vectors. 426 // WARNING: The current implementation supports only POD types. 427 template<typename T> 428 class InternalMmapVectorNoCtor { 429 public: 430 void Initialize(uptr initial_capacity) { 431 capacity_bytes_ = 0; 432 size_ = 0; 433 data_ = 0; 434 reserve(initial_capacity); 435 } 436 void Destroy() { UnmapOrDie(data_, capacity_bytes_); } 437 T &operator[](uptr i) { 438 CHECK_LT(i, size_); 439 return data_[i]; 440 } 441 const T &operator[](uptr i) const { 442 CHECK_LT(i, size_); 443 return data_[i]; 444 } 445 void push_back(const T &element) { 446 CHECK_LE(size_, capacity()); 447 if (size_ == capacity()) { 448 uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); 449 Realloc(new_capacity); 450 } 451 internal_memcpy(&data_[size_++], &element, sizeof(T)); 452 } 453 T &back() { 454 CHECK_GT(size_, 0); 455 return data_[size_ - 1]; 456 } 457 void pop_back() { 458 CHECK_GT(size_, 0); 459 size_--; 460 } 461 uptr size() const { 462 return size_; 463 } 464 const T *data() const { 465 return data_; 466 } 467 T *data() { 468 return data_; 469 } 470 uptr capacity() const { return capacity_bytes_ / sizeof(T); } 471 void reserve(uptr new_size) { 472 // Never downsize internal buffer. 473 if (new_size > capacity()) 474 Realloc(new_size); 475 } 476 void resize(uptr new_size) { 477 if (new_size > size_) { 478 reserve(new_size); 479 internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); 480 } 481 size_ = new_size; 482 } 483 484 void clear() { size_ = 0; } 485 bool empty() const { return size() == 0; } 486 487 const T *begin() const { 488 return data(); 489 } 490 T *begin() { 491 return data(); 492 } 493 const T *end() const { 494 return data() + size(); 495 } 496 T *end() { 497 return data() + size(); 498 } 499 500 void swap(InternalMmapVectorNoCtor &other) { 501 Swap(data_, other.data_); 502 Swap(capacity_bytes_, other.capacity_bytes_); 503 Swap(size_, other.size_); 504 } 505 506 private: 507 void Realloc(uptr new_capacity) { 508 CHECK_GT(new_capacity, 0); 509 CHECK_LE(size_, new_capacity); 510 uptr new_capacity_bytes = 511 RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); 512 T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector"); 513 internal_memcpy(new_data, data_, size_ * sizeof(T)); 514 UnmapOrDie(data_, capacity_bytes_); 515 data_ = new_data; 516 capacity_bytes_ = new_capacity_bytes; 517 } 518 519 T *data_; 520 uptr capacity_bytes_; 521 uptr size_; 522 }; 523 524 template <typename T> 525 bool operator==(const InternalMmapVectorNoCtor<T> &lhs, 526 const InternalMmapVectorNoCtor<T> &rhs) { 527 if (lhs.size() != rhs.size()) return false; 528 return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; 529 } 530 531 template <typename T> 532 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, 533 const InternalMmapVectorNoCtor<T> &rhs) { 534 return !(lhs == rhs); 535 } 536 537 template<typename T> 538 class InternalMmapVector : public InternalMmapVectorNoCtor<T> { 539 public: 540 InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); } 541 explicit InternalMmapVector(uptr cnt) { 542 InternalMmapVectorNoCtor<T>::Initialize(cnt); 543 this->resize(cnt); 544 } 545 ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } 546 // Disallow copies and moves. 547 InternalMmapVector(const InternalMmapVector &) = delete; 548 InternalMmapVector &operator=(const InternalMmapVector &) = delete; 549 InternalMmapVector(InternalMmapVector &&) = delete; 550 InternalMmapVector &operator=(InternalMmapVector &&) = delete; 551 }; 552 553 class InternalScopedString : public InternalMmapVector<char> { 554 public: 555 explicit InternalScopedString(uptr max_length) 556 : InternalMmapVector<char>(max_length), length_(0) { 557 (*this)[0] = '\0'; 558 } 559 uptr length() { return length_; } 560 void clear() { 561 (*this)[0] = '\0'; 562 length_ = 0; 563 } 564 void append(const char *format, ...); 565 566 private: 567 uptr length_; 568 }; 569 570 template <class T> 571 struct CompareLess { 572 bool operator()(const T &a, const T &b) const { return a < b; } 573 }; 574 575 // HeapSort for arrays and InternalMmapVector. 576 template <class T, class Compare = CompareLess<T>> 577 void Sort(T *v, uptr size, Compare comp = {}) { 578 if (size < 2) 579 return; 580 // Stage 1: insert elements to the heap. 581 for (uptr i = 1; i < size; i++) { 582 uptr j, p; 583 for (j = i; j > 0; j = p) { 584 p = (j - 1) / 2; 585 if (comp(v[p], v[j])) 586 Swap(v[j], v[p]); 587 else 588 break; 589 } 590 } 591 // Stage 2: swap largest element with the last one, 592 // and sink the new top. 593 for (uptr i = size - 1; i > 0; i--) { 594 Swap(v[0], v[i]); 595 uptr j, max_ind; 596 for (j = 0; j < i; j = max_ind) { 597 uptr left = 2 * j + 1; 598 uptr right = 2 * j + 2; 599 max_ind = j; 600 if (left < i && comp(v[max_ind], v[left])) 601 max_ind = left; 602 if (right < i && comp(v[max_ind], v[right])) 603 max_ind = right; 604 if (max_ind != j) 605 Swap(v[j], v[max_ind]); 606 else 607 break; 608 } 609 } 610 } 611 612 // Works like std::lower_bound: finds the first element that is not less 613 // than the val. 614 template <class Container, class Value, class Compare> 615 uptr InternalLowerBound(const Container &v, uptr first, uptr last, 616 const Value &val, Compare comp) { 617 while (last > first) { 618 uptr mid = (first + last) / 2; 619 if (comp(v[mid], val)) 620 first = mid + 1; 621 else 622 last = mid; 623 } 624 return first; 625 } 626 627 enum ModuleArch { 628 kModuleArchUnknown, 629 kModuleArchI386, 630 kModuleArchX86_64, 631 kModuleArchX86_64H, 632 kModuleArchARMV6, 633 kModuleArchARMV7, 634 kModuleArchARMV7S, 635 kModuleArchARMV7K, 636 kModuleArchARM64 637 }; 638 639 // Opens the file 'file_name" and reads up to 'max_len' bytes. 640 // The resulting buffer is mmaped and stored in '*buff'. 641 // Returns true if file was successfully opened and read. 642 bool ReadFileToVector(const char *file_name, 643 InternalMmapVectorNoCtor<char> *buff, 644 uptr max_len = 1 << 26, error_t *errno_p = nullptr); 645 646 // Opens the file 'file_name" and reads up to 'max_len' bytes. 647 // This function is less I/O efficient than ReadFileToVector as it may reread 648 // file multiple times to avoid mmap during read attempts. It's used to read 649 // procmap, so short reads with mmap in between can produce inconsistent result. 650 // The resulting buffer is mmaped and stored in '*buff'. 651 // The size of the mmaped region is stored in '*buff_size'. 652 // The total number of read bytes is stored in '*read_len'. 653 // Returns true if file was successfully opened and read. 654 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 655 uptr *read_len, uptr max_len = 1 << 26, 656 error_t *errno_p = nullptr); 657 658 // When adding a new architecture, don't forget to also update 659 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc. 660 inline const char *ModuleArchToString(ModuleArch arch) { 661 switch (arch) { 662 case kModuleArchUnknown: 663 return ""; 664 case kModuleArchI386: 665 return "i386"; 666 case kModuleArchX86_64: 667 return "x86_64"; 668 case kModuleArchX86_64H: 669 return "x86_64h"; 670 case kModuleArchARMV6: 671 return "armv6"; 672 case kModuleArchARMV7: 673 return "armv7"; 674 case kModuleArchARMV7S: 675 return "armv7s"; 676 case kModuleArchARMV7K: 677 return "armv7k"; 678 case kModuleArchARM64: 679 return "arm64"; 680 } 681 CHECK(0 && "Invalid module arch"); 682 return ""; 683 } 684 685 const uptr kModuleUUIDSize = 16; 686 const uptr kMaxSegName = 16; 687 688 // Represents a binary loaded into virtual memory (e.g. this can be an 689 // executable or a shared object). 690 class LoadedModule { 691 public: 692 LoadedModule() 693 : full_name_(nullptr), 694 base_address_(0), 695 max_executable_address_(0), 696 arch_(kModuleArchUnknown), 697 instrumented_(false) { 698 internal_memset(uuid_, 0, kModuleUUIDSize); 699 ranges_.clear(); 700 } 701 void set(const char *module_name, uptr base_address); 702 void set(const char *module_name, uptr base_address, ModuleArch arch, 703 u8 uuid[kModuleUUIDSize], bool instrumented); 704 void clear(); 705 void addAddressRange(uptr beg, uptr end, bool executable, bool writable, 706 const char *name = nullptr); 707 bool containsAddress(uptr address) const; 708 709 const char *full_name() const { return full_name_; } 710 uptr base_address() const { return base_address_; } 711 uptr max_executable_address() const { return max_executable_address_; } 712 ModuleArch arch() const { return arch_; } 713 const u8 *uuid() const { return uuid_; } 714 bool instrumented() const { return instrumented_; } 715 716 struct AddressRange { 717 AddressRange *next; 718 uptr beg; 719 uptr end; 720 bool executable; 721 bool writable; 722 char name[kMaxSegName]; 723 724 AddressRange(uptr beg, uptr end, bool executable, bool writable, 725 const char *name) 726 : next(nullptr), 727 beg(beg), 728 end(end), 729 executable(executable), 730 writable(writable) { 731 internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name)); 732 } 733 }; 734 735 const IntrusiveList<AddressRange> &ranges() const { return ranges_; } 736 737 private: 738 char *full_name_; // Owned. 739 uptr base_address_; 740 uptr max_executable_address_; 741 ModuleArch arch_; 742 u8 uuid_[kModuleUUIDSize]; 743 bool instrumented_; 744 IntrusiveList<AddressRange> ranges_; 745 }; 746 747 // List of LoadedModules. OS-dependent implementation is responsible for 748 // filling this information. 749 class ListOfModules { 750 public: 751 ListOfModules() : initialized(false) {} 752 ~ListOfModules() { clear(); } 753 void init(); 754 void fallbackInit(); // Uses fallback init if available, otherwise clears 755 const LoadedModule *begin() const { return modules_.begin(); } 756 LoadedModule *begin() { return modules_.begin(); } 757 const LoadedModule *end() const { return modules_.end(); } 758 LoadedModule *end() { return modules_.end(); } 759 uptr size() const { return modules_.size(); } 760 const LoadedModule &operator[](uptr i) const { 761 CHECK_LT(i, modules_.size()); 762 return modules_[i]; 763 } 764 765 private: 766 void clear() { 767 for (auto &module : modules_) module.clear(); 768 modules_.clear(); 769 } 770 void clearOrInit() { 771 initialized ? clear() : modules_.Initialize(kInitialCapacity); 772 initialized = true; 773 } 774 775 InternalMmapVectorNoCtor<LoadedModule> modules_; 776 // We rarely have more than 16K loaded modules. 777 static const uptr kInitialCapacity = 1 << 14; 778 bool initialized; 779 }; 780 781 // Callback type for iterating over a set of memory ranges. 782 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); 783 784 enum AndroidApiLevel { 785 ANDROID_NOT_ANDROID = 0, 786 ANDROID_KITKAT = 19, 787 ANDROID_LOLLIPOP_MR1 = 22, 788 ANDROID_POST_LOLLIPOP = 23 789 }; 790 791 void WriteToSyslog(const char *buffer); 792 793 #if SANITIZER_MAC 794 void LogFullErrorReport(const char *buffer); 795 #else 796 INLINE void LogFullErrorReport(const char *buffer) {} 797 #endif 798 799 #if SANITIZER_LINUX || SANITIZER_MAC 800 void WriteOneLineToSyslog(const char *s); 801 void LogMessageOnPrintf(const char *str); 802 #else 803 INLINE void WriteOneLineToSyslog(const char *s) {} 804 INLINE void LogMessageOnPrintf(const char *str) {} 805 #endif 806 807 #if SANITIZER_LINUX 808 // Initialize Android logging. Any writes before this are silently lost. 809 void AndroidLogInit(); 810 void SetAbortMessage(const char *); 811 #else 812 INLINE void AndroidLogInit() {} 813 // FIXME: MacOS implementation could use CRSetCrashLogMessage. 814 INLINE void SetAbortMessage(const char *) {} 815 #endif 816 817 #if SANITIZER_ANDROID 818 void SanitizerInitializeUnwinder(); 819 AndroidApiLevel AndroidGetApiLevel(); 820 #else 821 INLINE void AndroidLogWrite(const char *buffer_unused) {} 822 INLINE void SanitizerInitializeUnwinder() {} 823 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } 824 #endif 825 826 INLINE uptr GetPthreadDestructorIterations() { 827 #if SANITIZER_ANDROID 828 return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; 829 #elif SANITIZER_POSIX 830 return 4; 831 #else 832 // Unused on Windows. 833 return 0; 834 #endif 835 } 836 837 void *internal_start_thread(void(*func)(void*), void *arg); 838 void internal_join_thread(void *th); 839 void MaybeStartBackgroudThread(); 840 841 // Make the compiler think that something is going on there. 842 // Use this inside a loop that looks like memset/memcpy/etc to prevent the 843 // compiler from recognising it and turning it into an actual call to 844 // memset/memcpy/etc. 845 static inline void SanitizerBreakOptimization(void *arg) { 846 #if defined(_MSC_VER) && !defined(__clang__) 847 _ReadWriteBarrier(); 848 #else 849 __asm__ __volatile__("" : : "r" (arg) : "memory"); 850 #endif 851 } 852 853 struct SignalContext { 854 void *siginfo; 855 void *context; 856 uptr addr; 857 uptr pc; 858 uptr sp; 859 uptr bp; 860 bool is_memory_access; 861 enum WriteFlag { UNKNOWN, READ, WRITE } write_flag; 862 863 // VS2013 doesn't implement unrestricted unions, so we need a trivial default 864 // constructor 865 SignalContext() = default; 866 867 // Creates signal context in a platform-specific manner. 868 // SignalContext is going to keep pointers to siginfo and context without 869 // owning them. 870 SignalContext(void *siginfo, void *context) 871 : siginfo(siginfo), 872 context(context), 873 addr(GetAddress()), 874 is_memory_access(IsMemoryAccess()), 875 write_flag(GetWriteFlag()) { 876 InitPcSpBp(); 877 } 878 879 static void DumpAllRegisters(void *context); 880 881 // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION. 882 int GetType() const; 883 884 // String description of the signal. 885 const char *Describe() const; 886 887 // Returns true if signal is stack overflow. 888 bool IsStackOverflow() const; 889 890 private: 891 // Platform specific initialization. 892 void InitPcSpBp(); 893 uptr GetAddress() const; 894 WriteFlag GetWriteFlag() const; 895 bool IsMemoryAccess() const; 896 }; 897 898 void MaybeReexec(); 899 900 template <typename Fn> 901 class RunOnDestruction { 902 public: 903 explicit RunOnDestruction(Fn fn) : fn_(fn) {} 904 ~RunOnDestruction() { fn_(); } 905 906 private: 907 Fn fn_; 908 }; 909 910 // A simple scope guard. Usage: 911 // auto cleanup = at_scope_exit([]{ do_cleanup; }); 912 template <typename Fn> 913 RunOnDestruction<Fn> at_scope_exit(Fn fn) { 914 return RunOnDestruction<Fn>(fn); 915 } 916 917 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine 918 // if a process uses virtual memory over 4TB (as many sanitizers like 919 // to do). This function will abort the process if running on a kernel 920 // that looks vulnerable. 921 #if SANITIZER_LINUX && SANITIZER_S390_64 922 void AvoidCVE_2016_2143(); 923 #else 924 INLINE void AvoidCVE_2016_2143() {} 925 #endif 926 927 struct StackDepotStats { 928 uptr n_uniq_ids; 929 uptr allocated; 930 }; 931 932 // The default value for allocator_release_to_os_interval_ms common flag to 933 // indicate that sanitizer allocator should not attempt to release memory to OS. 934 const s32 kReleaseToOSIntervalNever = -1; 935 936 void CheckNoDeepBind(const char *filename, int flag); 937 938 // Returns the requested amount of random data (up to 256 bytes) that can then 939 // be used to seed a PRNG. Defaults to blocking like the underlying syscall. 940 bool GetRandom(void *buffer, uptr length, bool blocking = true); 941 942 // Returns the number of logical processors on the system. 943 u32 GetNumberOfCPUs(); 944 extern u32 NumberOfCPUsCached; 945 INLINE u32 GetNumberOfCPUsCached() { 946 if (!NumberOfCPUsCached) 947 NumberOfCPUsCached = GetNumberOfCPUs(); 948 return NumberOfCPUsCached; 949 } 950 951 } // namespace __sanitizer 952 953 inline void *operator new(__sanitizer::operator_new_size_type size, 954 __sanitizer::LowLevelAllocator &alloc) { 955 return alloc.Allocate(size); 956 } 957 958 #endif // SANITIZER_COMMON_H 959