xref: /netbsd-src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/sanitizer_common.h (revision a7c257b03e4462df2b1020128fb82716512d7856)
1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between run-time libraries of sanitizers.
11 //
12 // It declares common functions and classes that are used in both runtimes.
13 // Implementation of some functions are provided in sanitizer_common, while
14 // others must be defined by run-time library itself.
15 //===----------------------------------------------------------------------===//
16 #ifndef SANITIZER_COMMON_H
17 #define SANITIZER_COMMON_H
18 
19 #include "sanitizer_flags.h"
20 #include "sanitizer_interface_internal.h"
21 #include "sanitizer_internal_defs.h"
22 #include "sanitizer_libc.h"
23 #include "sanitizer_list.h"
24 #include "sanitizer_mutex.h"
25 
26 #if defined(_MSC_VER) && !defined(__clang__)
27 extern "C" void _ReadWriteBarrier();
28 #pragma intrinsic(_ReadWriteBarrier)
29 #endif
30 
31 namespace __sanitizer {
32 
33 struct AddressInfo;
34 struct BufferedStackTrace;
35 struct SignalContext;
36 struct StackTrace;
37 
38 // Constants.
39 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
40 const uptr kWordSizeInBits = 8 * kWordSize;
41 
42 const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
43 
44 const uptr kMaxPathLength = 4096;
45 
46 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
47 
48 static const uptr kErrorMessageBufferSize = 1 << 16;
49 
50 // Denotes fake PC values that come from JIT/JAVA/etc.
51 // For such PC values __tsan_symbolize_external_ex() will be called.
52 const u64 kExternalPCBit = 1ULL << 60;
53 
54 extern const char *SanitizerToolName;  // Can be changed by the tool.
55 
56 extern atomic_uint32_t current_verbosity;
SetVerbosity(int verbosity)57 INLINE void SetVerbosity(int verbosity) {
58   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
59 }
Verbosity()60 INLINE int Verbosity() {
61   return atomic_load(&current_verbosity, memory_order_relaxed);
62 }
63 
64 uptr GetPageSize();
65 extern uptr PageSizeCached;
GetPageSizeCached()66 INLINE uptr GetPageSizeCached() {
67   if (!PageSizeCached)
68     PageSizeCached = GetPageSize();
69   return PageSizeCached;
70 }
71 uptr GetMmapGranularity();
72 uptr GetMaxVirtualAddress();
73 uptr GetMaxUserVirtualAddress();
74 // Threads
75 tid_t GetTid();
76 int TgKill(pid_t pid, tid_t tid, int sig);
77 uptr GetThreadSelf();
78 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
79                                 uptr *stack_bottom);
80 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
81                           uptr *tls_addr, uptr *tls_size);
82 
83 // Memory management
84 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
MmapOrDieQuietly(uptr size,const char * mem_type)85 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
86   return MmapOrDie(size, mem_type, /*raw_report*/ true);
87 }
88 void UnmapOrDie(void *addr, uptr size);
89 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
90 // case returns nullptr.
91 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
92 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
93      WARN_UNUSED_RESULT;
94 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
95 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
96 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
97 // that case returns nullptr.
98 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
99 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
100 void *MmapNoAccess(uptr size);
101 // Map aligned chunk of address space; size and alignment are powers of two.
102 // Dies on all but out of memory errors, in the latter case returns nullptr.
103 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
104                                    const char *mem_type);
105 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
106 // unaccessible memory.
107 bool MprotectNoAccess(uptr addr, uptr size);
108 bool MprotectReadOnly(uptr addr, uptr size);
109 
110 void MprotectMallocZones(void *addr, int prot);
111 
112 // Find an available address space.
113 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
114                               uptr *largest_gap_found, uptr *max_occupied_addr);
115 
116 // Used to check if we can map shadow memory to a fixed location.
117 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
118 // Releases memory pages entirely within the [beg, end] address range. Noop if
119 // the provided range does not contain at least one entire page.
120 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
121 void IncreaseTotalMmap(uptr size);
122 void DecreaseTotalMmap(uptr size);
123 uptr GetRSS();
124 bool NoHugePagesInRegion(uptr addr, uptr length);
125 bool DontDumpShadowMemory(uptr addr, uptr length);
126 // Check if the built VMA size matches the runtime one.
127 void CheckVMASize();
128 void RunMallocHooks(const void *ptr, uptr size);
129 void RunFreeHooks(const void *ptr);
130 
131 class ReservedAddressRange {
132  public:
133   uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
134   uptr Map(uptr fixed_addr, uptr size);
135   uptr MapOrDie(uptr fixed_addr, uptr size);
136   void Unmap(uptr addr, uptr size);
base()137   void *base() const { return base_; }
size()138   uptr size() const { return size_; }
139 
140  private:
141   void* base_;
142   uptr size_;
143   const char* name_;
144   uptr os_handle_;
145 };
146 
147 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
148                                /*out*/uptr *stats, uptr stats_size);
149 
150 // Parse the contents of /proc/self/smaps and generate a memory profile.
151 // |cb| is a tool-specific callback that fills the |stats| array containing
152 // |stats_size| elements.
153 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
154 
155 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
156 // constructor, so all instances of LowLevelAllocator should be
157 // linker initialized.
158 class LowLevelAllocator {
159  public:
160   // Requires an external lock.
161   void *Allocate(uptr size);
162  private:
163   char *allocated_end_;
164   char *allocated_current_;
165 };
166 // Set the min alignment of LowLevelAllocator to at least alignment.
167 void SetLowLevelAllocateMinAlignment(uptr alignment);
168 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
169 // Allows to register tool-specific callbacks for LowLevelAllocator.
170 // Passing NULL removes the callback.
171 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
172 
173 // IO
174 void CatastrophicErrorWrite(const char *buffer, uptr length);
175 void RawWrite(const char *buffer);
176 bool ColorizeReports();
177 void RemoveANSIEscapeSequencesFromString(char *buffer);
178 void Printf(const char *format, ...);
179 void Report(const char *format, ...);
180 void SetPrintfAndReportCallback(void (*callback)(const char *));
181 #define VReport(level, ...)                                              \
182   do {                                                                   \
183     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
184   } while (0)
185 #define VPrintf(level, ...)                                              \
186   do {                                                                   \
187     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
188   } while (0)
189 
190 // Lock sanitizer error reporting and protects against nested errors.
191 class ScopedErrorReportLock {
192  public:
193   ScopedErrorReportLock();
194   ~ScopedErrorReportLock();
195 
196   static void CheckLocked();
197 };
198 
199 extern uptr stoptheworld_tracer_pid;
200 extern uptr stoptheworld_tracer_ppid;
201 
202 bool IsAccessibleMemoryRange(uptr beg, uptr size);
203 
204 // Error report formatting.
205 const char *StripPathPrefix(const char *filepath,
206                             const char *strip_file_prefix);
207 // Strip the directories from the module name.
208 const char *StripModuleName(const char *module);
209 
210 // OS
211 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
212 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
213 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
214 const char *GetProcessName();
215 void UpdateProcessName();
216 void CacheBinaryName();
217 void DisableCoreDumperIfNecessary();
218 void DumpProcessMap();
219 void PrintModuleMap();
220 const char *GetEnv(const char *name);
221 bool SetEnv(const char *name, const char *value);
222 
223 u32 GetUid();
224 void ReExec();
225 void CheckASLR();
226 void CheckMPROTECT();
227 char **GetArgv();
228 char **GetEnviron();
229 void PrintCmdline();
230 bool StackSizeIsUnlimited();
231 uptr GetStackSizeLimitInBytes();
232 void SetStackSizeLimitInBytes(uptr limit);
233 bool AddressSpaceIsUnlimited();
234 void SetAddressSpaceUnlimited();
235 void AdjustStackSize(void *attr);
236 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
237 void SetSandboxingCallback(void (*f)());
238 
239 void InitializeCoverage(bool enabled, const char *coverage_dir);
240 
241 void InitTlsSize();
242 uptr GetTlsSize();
243 
244 // Other
245 void SleepForSeconds(int seconds);
246 void SleepForMillis(int millis);
247 u64 NanoTime();
248 u64 MonotonicNanoTime();
249 int Atexit(void (*function)(void));
250 bool TemplateMatch(const char *templ, const char *str);
251 
252 // Exit
253 void NORETURN Abort();
254 void NORETURN Die();
255 void NORETURN
256 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
257 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
258                                       const char *mmap_type, error_t err,
259                                       bool raw_report = false);
260 
261 // Specific tools may override behavior of "Die" and "CheckFailed" functions
262 // to do tool-specific job.
263 typedef void (*DieCallbackType)(void);
264 
265 // It's possible to add several callbacks that would be run when "Die" is
266 // called. The callbacks will be run in the opposite order. The tools are
267 // strongly recommended to setup all callbacks during initialization, when there
268 // is only a single thread.
269 bool AddDieCallback(DieCallbackType callback);
270 bool RemoveDieCallback(DieCallbackType callback);
271 
272 void SetUserDieCallback(DieCallbackType callback);
273 
274 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
275                                        u64, u64);
276 void SetCheckFailedCallback(CheckFailedCallbackType callback);
277 
278 // Callback will be called if soft_rss_limit_mb is given and the limit is
279 // exceeded (exceeded==true) or if rss went down below the limit
280 // (exceeded==false).
281 // The callback should be registered once at the tool init time.
282 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
283 
284 // Functions related to signal handling.
285 typedef void (*SignalHandlerType)(int, void *, void *);
286 HandleSignalMode GetHandleSignalMode(int signum);
287 void InstallDeadlySignalHandlers(SignalHandlerType handler);
288 
289 // Signal reporting.
290 // Each sanitizer uses slightly different implementation of stack unwinding.
291 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
292                                               const void *callback_context,
293                                               BufferedStackTrace *stack);
294 // Print deadly signal report and die.
295 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
296                         UnwindSignalStackCallbackType unwind,
297                         const void *unwind_context);
298 
299 // Part of HandleDeadlySignal, exposed for asan.
300 void StartReportDeadlySignal();
301 // Part of HandleDeadlySignal, exposed for asan.
302 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
303                         UnwindSignalStackCallbackType unwind,
304                         const void *unwind_context);
305 
306 // Alternative signal stack (POSIX-only).
307 void SetAlternateSignalStack();
308 void UnsetAlternateSignalStack();
309 
310 // We don't want a summary too long.
311 const int kMaxSummaryLength = 1024;
312 // Construct a one-line string:
313 //   SUMMARY: SanitizerToolName: error_message
314 // and pass it to __sanitizer_report_error_summary.
315 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
316 void ReportErrorSummary(const char *error_message,
317                         const char *alt_tool_name = nullptr);
318 // Same as above, but construct error_message as:
319 //   error_type file:line[:column][ function]
320 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
321                         const char *alt_tool_name = nullptr);
322 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
323 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
324                         const char *alt_tool_name = nullptr);
325 
326 void ReportMmapWriteExec(int prot);
327 
328 // Math
329 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
330 extern "C" {
331 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
332 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
333 #if defined(_WIN64)
334 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
335 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
336 #endif
337 }
338 #endif
339 
MostSignificantSetBitIndex(uptr x)340 INLINE uptr MostSignificantSetBitIndex(uptr x) {
341   CHECK_NE(x, 0U);
342   unsigned long up;  // NOLINT
343 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
344 # ifdef _WIN64
345   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
346 # else
347   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
348 # endif
349 #elif defined(_WIN64)
350   _BitScanReverse64(&up, x);
351 #else
352   _BitScanReverse(&up, x);
353 #endif
354   return up;
355 }
356 
LeastSignificantSetBitIndex(uptr x)357 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
358   CHECK_NE(x, 0U);
359   unsigned long up;  // NOLINT
360 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
361 # ifdef _WIN64
362   up = __builtin_ctzll(x);
363 # else
364   up = __builtin_ctzl(x);
365 # endif
366 #elif defined(_WIN64)
367   _BitScanForward64(&up, x);
368 #else
369   _BitScanForward(&up, x);
370 #endif
371   return up;
372 }
373 
IsPowerOfTwo(uptr x)374 INLINE bool IsPowerOfTwo(uptr x) {
375   return (x & (x - 1)) == 0;
376 }
377 
RoundUpToPowerOfTwo(uptr size)378 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
379   CHECK(size);
380   if (IsPowerOfTwo(size)) return size;
381 
382   uptr up = MostSignificantSetBitIndex(size);
383   CHECK_LT(size, (1ULL << (up + 1)));
384   CHECK_GT(size, (1ULL << up));
385   return 1ULL << (up + 1);
386 }
387 
RoundUpTo(uptr size,uptr boundary)388 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
389   RAW_CHECK(IsPowerOfTwo(boundary));
390   return (size + boundary - 1) & ~(boundary - 1);
391 }
392 
RoundDownTo(uptr x,uptr boundary)393 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
394   return x & ~(boundary - 1);
395 }
396 
IsAligned(uptr a,uptr alignment)397 INLINE bool IsAligned(uptr a, uptr alignment) {
398   return (a & (alignment - 1)) == 0;
399 }
400 
Log2(uptr x)401 INLINE uptr Log2(uptr x) {
402   CHECK(IsPowerOfTwo(x));
403   return LeastSignificantSetBitIndex(x);
404 }
405 
406 // Don't use std::min, std::max or std::swap, to minimize dependency
407 // on libstdc++.
Min(T a,T b)408 template<class T> T Min(T a, T b) { return a < b ? a : b; }
Max(T a,T b)409 template<class T> T Max(T a, T b) { return a > b ? a : b; }
Swap(T & a,T & b)410 template<class T> void Swap(T& a, T& b) {
411   T tmp = a;
412   a = b;
413   b = tmp;
414 }
415 
416 // Char handling
IsSpace(int c)417 INLINE bool IsSpace(int c) {
418   return (c == ' ') || (c == '\n') || (c == '\t') ||
419          (c == '\f') || (c == '\r') || (c == '\v');
420 }
IsDigit(int c)421 INLINE bool IsDigit(int c) {
422   return (c >= '0') && (c <= '9');
423 }
ToLower(int c)424 INLINE int ToLower(int c) {
425   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
426 }
427 
428 // A low-level vector based on mmap. May incur a significant memory overhead for
429 // small vectors.
430 // WARNING: The current implementation supports only POD types.
431 template<typename T>
432 class InternalMmapVectorNoCtor {
433  public:
Initialize(uptr initial_capacity)434   void Initialize(uptr initial_capacity) {
435     capacity_bytes_ = 0;
436     size_ = 0;
437     data_ = 0;
438     reserve(initial_capacity);
439   }
Destroy()440   void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
441   T &operator[](uptr i) {
442     CHECK_LT(i, size_);
443     return data_[i];
444   }
445   const T &operator[](uptr i) const {
446     CHECK_LT(i, size_);
447     return data_[i];
448   }
push_back(const T & element)449   void push_back(const T &element) {
450     CHECK_LE(size_, capacity());
451     if (size_ == capacity()) {
452       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
453       Realloc(new_capacity);
454     }
455     internal_memcpy(&data_[size_++], &element, sizeof(T));
456   }
back()457   T &back() {
458     CHECK_GT(size_, 0);
459     return data_[size_ - 1];
460   }
pop_back()461   void pop_back() {
462     CHECK_GT(size_, 0);
463     size_--;
464   }
size()465   uptr size() const {
466     return size_;
467   }
data()468   const T *data() const {
469     return data_;
470   }
data()471   T *data() {
472     return data_;
473   }
capacity()474   uptr capacity() const { return capacity_bytes_ / sizeof(T); }
reserve(uptr new_size)475   void reserve(uptr new_size) {
476     // Never downsize internal buffer.
477     if (new_size > capacity())
478       Realloc(new_size);
479   }
resize(uptr new_size)480   void resize(uptr new_size) {
481     if (new_size > size_) {
482       reserve(new_size);
483       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
484     }
485     size_ = new_size;
486   }
487 
clear()488   void clear() { size_ = 0; }
empty()489   bool empty() const { return size() == 0; }
490 
begin()491   const T *begin() const {
492     return data();
493   }
begin()494   T *begin() {
495     return data();
496   }
end()497   const T *end() const {
498     return data() + size();
499   }
end()500   T *end() {
501     return data() + size();
502   }
503 
swap(InternalMmapVectorNoCtor & other)504   void swap(InternalMmapVectorNoCtor &other) {
505     Swap(data_, other.data_);
506     Swap(capacity_bytes_, other.capacity_bytes_);
507     Swap(size_, other.size_);
508   }
509 
510  private:
Realloc(uptr new_capacity)511   void Realloc(uptr new_capacity) {
512     CHECK_GT(new_capacity, 0);
513     CHECK_LE(size_, new_capacity);
514     uptr new_capacity_bytes =
515         RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
516     T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
517     internal_memcpy(new_data, data_, size_ * sizeof(T));
518     UnmapOrDie(data_, capacity_bytes_);
519     data_ = new_data;
520     capacity_bytes_ = new_capacity_bytes;
521   }
522 
523   T *data_;
524   uptr capacity_bytes_;
525   uptr size_;
526 };
527 
528 template <typename T>
529 bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
530                 const InternalMmapVectorNoCtor<T> &rhs) {
531   if (lhs.size() != rhs.size()) return false;
532   return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
533 }
534 
535 template <typename T>
536 bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
537                 const InternalMmapVectorNoCtor<T> &rhs) {
538   return !(lhs == rhs);
539 }
540 
541 template<typename T>
542 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
543  public:
InternalMmapVector()544   InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); }
InternalMmapVector(uptr cnt)545   explicit InternalMmapVector(uptr cnt) {
546     InternalMmapVectorNoCtor<T>::Initialize(cnt);
547     this->resize(cnt);
548   }
~InternalMmapVector()549   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
550   // Disallow copies and moves.
551   InternalMmapVector(const InternalMmapVector &) = delete;
552   InternalMmapVector &operator=(const InternalMmapVector &) = delete;
553   InternalMmapVector(InternalMmapVector &&) = delete;
554   InternalMmapVector &operator=(InternalMmapVector &&) = delete;
555 };
556 
557 class InternalScopedString : public InternalMmapVector<char> {
558  public:
InternalScopedString(uptr max_length)559   explicit InternalScopedString(uptr max_length)
560       : InternalMmapVector<char>(max_length), length_(0) {
561     (*this)[0] = '\0';
562   }
length()563   uptr length() { return length_; }
clear()564   void clear() {
565     (*this)[0] = '\0';
566     length_ = 0;
567   }
568   void append(const char *format, ...);
569 
570  private:
571   uptr length_;
572 };
573 
574 template <class T>
575 struct CompareLess {
operatorCompareLess576   bool operator()(const T &a, const T &b) const { return a < b; }
577 };
578 
579 // HeapSort for arrays and InternalMmapVector.
580 template <class T, class Compare = CompareLess<T>>
581 void Sort(T *v, uptr size, Compare comp = {}) {
582   if (size < 2)
583     return;
584   // Stage 1: insert elements to the heap.
585   for (uptr i = 1; i < size; i++) {
586     uptr j, p;
587     for (j = i; j > 0; j = p) {
588       p = (j - 1) / 2;
589       if (comp(v[p], v[j]))
590         Swap(v[j], v[p]);
591       else
592         break;
593     }
594   }
595   // Stage 2: swap largest element with the last one,
596   // and sink the new top.
597   for (uptr i = size - 1; i > 0; i--) {
598     Swap(v[0], v[i]);
599     uptr j, max_ind;
600     for (j = 0; j < i; j = max_ind) {
601       uptr left = 2 * j + 1;
602       uptr right = 2 * j + 2;
603       max_ind = j;
604       if (left < i && comp(v[max_ind], v[left]))
605         max_ind = left;
606       if (right < i && comp(v[max_ind], v[right]))
607         max_ind = right;
608       if (max_ind != j)
609         Swap(v[j], v[max_ind]);
610       else
611         break;
612     }
613   }
614 }
615 
616 // Works like std::lower_bound: finds the first element that is not less
617 // than the val.
618 template <class Container, class Value, class Compare>
InternalLowerBound(const Container & v,uptr first,uptr last,const Value & val,Compare comp)619 uptr InternalLowerBound(const Container &v, uptr first, uptr last,
620                         const Value &val, Compare comp) {
621   while (last > first) {
622     uptr mid = (first + last) / 2;
623     if (comp(v[mid], val))
624       first = mid + 1;
625     else
626       last = mid;
627   }
628   return first;
629 }
630 
631 enum ModuleArch {
632   kModuleArchUnknown,
633   kModuleArchI386,
634   kModuleArchX86_64,
635   kModuleArchX86_64H,
636   kModuleArchARMV6,
637   kModuleArchARMV7,
638   kModuleArchARMV7S,
639   kModuleArchARMV7K,
640   kModuleArchARM64
641 };
642 
643 // Opens the file 'file_name" and reads up to 'max_len' bytes.
644 // The resulting buffer is mmaped and stored in '*buff'.
645 // Returns true if file was successfully opened and read.
646 bool ReadFileToVector(const char *file_name,
647                       InternalMmapVectorNoCtor<char> *buff,
648                       uptr max_len = 1 << 26, error_t *errno_p = nullptr);
649 
650 // Opens the file 'file_name" and reads up to 'max_len' bytes.
651 // This function is less I/O efficient than ReadFileToVector as it may reread
652 // file multiple times to avoid mmap during read attempts. It's used to read
653 // procmap, so short reads with mmap in between can produce inconsistent result.
654 // The resulting buffer is mmaped and stored in '*buff'.
655 // The size of the mmaped region is stored in '*buff_size'.
656 // The total number of read bytes is stored in '*read_len'.
657 // Returns true if file was successfully opened and read.
658 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
659                       uptr *read_len, uptr max_len = 1 << 26,
660                       error_t *errno_p = nullptr);
661 
662 // When adding a new architecture, don't forget to also update
663 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
ModuleArchToString(ModuleArch arch)664 inline const char *ModuleArchToString(ModuleArch arch) {
665   switch (arch) {
666     case kModuleArchUnknown:
667       return "";
668     case kModuleArchI386:
669       return "i386";
670     case kModuleArchX86_64:
671       return "x86_64";
672     case kModuleArchX86_64H:
673       return "x86_64h";
674     case kModuleArchARMV6:
675       return "armv6";
676     case kModuleArchARMV7:
677       return "armv7";
678     case kModuleArchARMV7S:
679       return "armv7s";
680     case kModuleArchARMV7K:
681       return "armv7k";
682     case kModuleArchARM64:
683       return "arm64";
684   }
685   CHECK(0 && "Invalid module arch");
686   return "";
687 }
688 
689 const uptr kModuleUUIDSize = 16;
690 const uptr kMaxSegName = 16;
691 
692 // Represents a binary loaded into virtual memory (e.g. this can be an
693 // executable or a shared object).
694 class LoadedModule {
695  public:
LoadedModule()696   LoadedModule()
697       : full_name_(nullptr),
698         base_address_(0),
699         max_executable_address_(0),
700         arch_(kModuleArchUnknown),
701         instrumented_(false) {
702     internal_memset(uuid_, 0, kModuleUUIDSize);
703     ranges_.clear();
704   }
705   void set(const char *module_name, uptr base_address);
706   void set(const char *module_name, uptr base_address, ModuleArch arch,
707            u8 uuid[kModuleUUIDSize], bool instrumented);
708   void clear();
709   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
710                        const char *name = nullptr);
711   bool containsAddress(uptr address) const;
712 
full_name()713   const char *full_name() const { return full_name_; }
base_address()714   uptr base_address() const { return base_address_; }
max_executable_address()715   uptr max_executable_address() const { return max_executable_address_; }
arch()716   ModuleArch arch() const { return arch_; }
uuid()717   const u8 *uuid() const { return uuid_; }
instrumented()718   bool instrumented() const { return instrumented_; }
719 
720   struct AddressRange {
721     AddressRange *next;
722     uptr beg;
723     uptr end;
724     bool executable;
725     bool writable;
726     char name[kMaxSegName];
727 
AddressRangeAddressRange728     AddressRange(uptr beg, uptr end, bool executable, bool writable,
729                  const char *name)
730         : next(nullptr),
731           beg(beg),
732           end(end),
733           executable(executable),
734           writable(writable) {
735       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
736     }
737   };
738 
ranges()739   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
740 
741  private:
742   char *full_name_;  // Owned.
743   uptr base_address_;
744   uptr max_executable_address_;
745   ModuleArch arch_;
746   u8 uuid_[kModuleUUIDSize];
747   bool instrumented_;
748   IntrusiveList<AddressRange> ranges_;
749 };
750 
751 // List of LoadedModules. OS-dependent implementation is responsible for
752 // filling this information.
753 class ListOfModules {
754  public:
ListOfModules()755   ListOfModules() : initialized(false) {}
~ListOfModules()756   ~ListOfModules() { clear(); }
757   void init();
758   void fallbackInit();  // Uses fallback init if available, otherwise clears
begin()759   const LoadedModule *begin() const { return modules_.begin(); }
begin()760   LoadedModule *begin() { return modules_.begin(); }
end()761   const LoadedModule *end() const { return modules_.end(); }
end()762   LoadedModule *end() { return modules_.end(); }
size()763   uptr size() const { return modules_.size(); }
764   const LoadedModule &operator[](uptr i) const {
765     CHECK_LT(i, modules_.size());
766     return modules_[i];
767   }
768 
769  private:
clear()770   void clear() {
771     for (auto &module : modules_) module.clear();
772     modules_.clear();
773   }
clearOrInit()774   void clearOrInit() {
775     initialized ? clear() : modules_.Initialize(kInitialCapacity);
776     initialized = true;
777   }
778 
779   InternalMmapVectorNoCtor<LoadedModule> modules_;
780   // We rarely have more than 16K loaded modules.
781   static const uptr kInitialCapacity = 1 << 14;
782   bool initialized;
783 };
784 
785 // Callback type for iterating over a set of memory ranges.
786 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
787 
788 enum AndroidApiLevel {
789   ANDROID_NOT_ANDROID = 0,
790   ANDROID_KITKAT = 19,
791   ANDROID_LOLLIPOP_MR1 = 22,
792   ANDROID_POST_LOLLIPOP = 23
793 };
794 
795 void WriteToSyslog(const char *buffer);
796 
797 #if SANITIZER_MAC
798 void LogFullErrorReport(const char *buffer);
799 #else
LogFullErrorReport(const char * buffer)800 INLINE void LogFullErrorReport(const char *buffer) {}
801 #endif
802 
803 #if SANITIZER_LINUX || SANITIZER_MAC
804 void WriteOneLineToSyslog(const char *s);
805 void LogMessageOnPrintf(const char *str);
806 #else
WriteOneLineToSyslog(const char * s)807 INLINE void WriteOneLineToSyslog(const char *s) {}
LogMessageOnPrintf(const char * str)808 INLINE void LogMessageOnPrintf(const char *str) {}
809 #endif
810 
811 #if SANITIZER_LINUX
812 // Initialize Android logging. Any writes before this are silently lost.
813 void AndroidLogInit();
814 void SetAbortMessage(const char *);
815 #else
AndroidLogInit()816 INLINE void AndroidLogInit() {}
817 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
SetAbortMessage(const char *)818 INLINE void SetAbortMessage(const char *) {}
819 #endif
820 
821 #if SANITIZER_ANDROID
822 void SanitizerInitializeUnwinder();
823 AndroidApiLevel AndroidGetApiLevel();
824 #else
AndroidLogWrite(const char * buffer_unused)825 INLINE void AndroidLogWrite(const char *buffer_unused) {}
SanitizerInitializeUnwinder()826 INLINE void SanitizerInitializeUnwinder() {}
AndroidGetApiLevel()827 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
828 #endif
829 
GetPthreadDestructorIterations()830 INLINE uptr GetPthreadDestructorIterations() {
831 #if SANITIZER_ANDROID
832   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
833 #elif SANITIZER_POSIX
834   return 4;
835 #else
836 // Unused on Windows.
837   return 0;
838 #endif
839 }
840 
841 void *internal_start_thread(void(*func)(void*), void *arg);
842 void internal_join_thread(void *th);
843 void MaybeStartBackgroudThread();
844 
845 // Make the compiler think that something is going on there.
846 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
847 // compiler from recognising it and turning it into an actual call to
848 // memset/memcpy/etc.
SanitizerBreakOptimization(void * arg)849 static inline void SanitizerBreakOptimization(void *arg) {
850 #if defined(_MSC_VER) && !defined(__clang__)
851   _ReadWriteBarrier();
852 #else
853   __asm__ __volatile__("" : : "r" (arg) : "memory");
854 #endif
855 }
856 
857 struct SignalContext {
858   void *siginfo;
859   void *context;
860   uptr addr;
861   uptr pc;
862   uptr sp;
863   uptr bp;
864   bool is_memory_access;
865   enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
866 
867   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
868   // constructor
869   SignalContext() = default;
870 
871   // Creates signal context in a platform-specific manner.
872   // SignalContext is going to keep pointers to siginfo and context without
873   // owning them.
SignalContextSignalContext874   SignalContext(void *siginfo, void *context)
875       : siginfo(siginfo),
876         context(context),
877         addr(GetAddress()),
878         is_memory_access(IsMemoryAccess()),
879         write_flag(GetWriteFlag()) {
880     InitPcSpBp();
881   }
882 
883   static void DumpAllRegisters(void *context);
884 
885   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
886   int GetType() const;
887 
888   // String description of the signal.
889   const char *Describe() const;
890 
891   // Returns true if signal is stack overflow.
892   bool IsStackOverflow() const;
893 
894  private:
895   // Platform specific initialization.
896   void InitPcSpBp();
897   uptr GetAddress() const;
898   WriteFlag GetWriteFlag() const;
899   bool IsMemoryAccess() const;
900 };
901 
902 void InitializePlatformEarly();
903 void MaybeReexec();
904 
905 template <typename Fn>
906 class RunOnDestruction {
907  public:
RunOnDestruction(Fn fn)908   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
~RunOnDestruction()909   ~RunOnDestruction() { fn_(); }
910 
911  private:
912   Fn fn_;
913 };
914 
915 // A simple scope guard. Usage:
916 // auto cleanup = at_scope_exit([]{ do_cleanup; });
917 template <typename Fn>
at_scope_exit(Fn fn)918 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
919   return RunOnDestruction<Fn>(fn);
920 }
921 
922 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
923 // if a process uses virtual memory over 4TB (as many sanitizers like
924 // to do).  This function will abort the process if running on a kernel
925 // that looks vulnerable.
926 #if SANITIZER_LINUX && SANITIZER_S390_64
927 void AvoidCVE_2016_2143();
928 #else
AvoidCVE_2016_2143()929 INLINE void AvoidCVE_2016_2143() {}
930 #endif
931 
932 struct StackDepotStats {
933   uptr n_uniq_ids;
934   uptr allocated;
935 };
936 
937 // The default value for allocator_release_to_os_interval_ms common flag to
938 // indicate that sanitizer allocator should not attempt to release memory to OS.
939 const s32 kReleaseToOSIntervalNever = -1;
940 
941 void CheckNoDeepBind(const char *filename, int flag);
942 
943 // Returns the requested amount of random data (up to 256 bytes) that can then
944 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
945 bool GetRandom(void *buffer, uptr length, bool blocking = true);
946 
947 // Returns the number of logical processors on the system.
948 u32 GetNumberOfCPUs();
949 extern u32 NumberOfCPUsCached;
GetNumberOfCPUsCached()950 INLINE u32 GetNumberOfCPUsCached() {
951   if (!NumberOfCPUsCached)
952     NumberOfCPUsCached = GetNumberOfCPUs();
953   return NumberOfCPUsCached;
954 }
955 
956 }  // namespace __sanitizer
957 
new(__sanitizer::operator_new_size_type size,__sanitizer::LowLevelAllocator & alloc)958 inline void *operator new(__sanitizer::operator_new_size_type size,
959                           __sanitizer::LowLevelAllocator &alloc) {
960   return alloc.Allocate(size);
961 }
962 
963 #endif  // SANITIZER_COMMON_H
964