xref: /netbsd-src/sys/external/bsd/compiler_rt/dist/lib/sanitizer_common/sanitizer_linux_libcdep.cc (revision c9b3e424d14111cabf0b48fedd2fefb232396444)
1 //===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is shared between AddressSanitizer and ThreadSanitizer
11 // run-time libraries and implements linux-specific functions from
12 // sanitizer_libc.h.
13 //===----------------------------------------------------------------------===//
14 
15 #include "sanitizer_platform.h"
16 
17 #if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||                \
18     SANITIZER_OPENBSD || SANITIZER_SOLARIS
19 
20 #include "sanitizer_allocator_internal.h"
21 #include "sanitizer_atomic.h"
22 #include "sanitizer_common.h"
23 #include "sanitizer_file.h"
24 #include "sanitizer_flags.h"
25 #include "sanitizer_freebsd.h"
26 #include "sanitizer_getauxval.h"
27 #include "sanitizer_linux.h"
28 #include "sanitizer_placement_new.h"
29 #include "sanitizer_procmaps.h"
30 
31 
32 #include <dlfcn.h>  // for dlsym()
33 #include <link.h>
34 #include <pthread.h>
35 #include <signal.h>
36 #include <sys/resource.h>
37 #include <syslog.h>
38 
39 #if SANITIZER_FREEBSD
40 #include <pthread_np.h>
41 #include <osreldate.h>
42 #include <sys/sysctl.h>
43 #define pthread_getattr_np pthread_attr_get_np
44 #endif
45 
46 #if SANITIZER_OPENBSD
47 #include <pthread_np.h>
48 #include <sys/sysctl.h>
49 #endif
50 
51 #if SANITIZER_NETBSD
52 #include <sys/sysctl.h>
53 #include <sys/tls.h>
54 // Fast LWP private pointer getters in ThreadSelfTlsTcb().
55 #include <machine/lwp_private.h>
56 #endif
57 
58 #if SANITIZER_SOLARIS
59 #include <thread.h>
60 #endif
61 
62 #if SANITIZER_ANDROID
63 #include <android/api-level.h>
64 #if !defined(CPU_COUNT) && !defined(__aarch64__)
65 #include <dirent.h>
66 #include <fcntl.h>
67 struct __sanitizer::linux_dirent {
68   long           d_ino;
69   off_t          d_off;
70   unsigned short d_reclen;
71   char           d_name[];
72 };
73 #endif
74 #endif
75 
76 #if !SANITIZER_ANDROID
77 #include <elf.h>
78 #include <unistd.h>
79 #endif
80 
81 namespace __sanitizer {
82 
83 SANITIZER_WEAK_ATTRIBUTE int
84 real_sigaction(int signum, const void *act, void *oldact);
85 
86 int internal_sigaction(int signum, const void *act, void *oldact) {
87 #if !SANITIZER_GO
88   if (&real_sigaction)
89     return real_sigaction(signum, act, oldact);
90 #endif
91   return sigaction(signum, (const struct sigaction *)act,
92                    (struct sigaction *)oldact);
93 }
94 
95 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
96                                 uptr *stack_bottom) {
97   CHECK(stack_top);
98   CHECK(stack_bottom);
99   if (at_initialization) {
100     // This is the main thread. Libpthread may not be initialized yet.
101     struct rlimit rl;
102     CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
103 
104     // Find the mapping that contains a stack variable.
105     MemoryMappingLayout proc_maps(/*cache_enabled*/true);
106     if (proc_maps.Error()) {
107       *stack_top = *stack_bottom = 0;
108       return;
109     }
110     MemoryMappedSegment segment;
111     uptr prev_end = 0;
112     while (proc_maps.Next(&segment)) {
113       if ((uptr)&rl < segment.end) break;
114       prev_end = segment.end;
115     }
116     CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
117 
118     // Get stacksize from rlimit, but clip it so that it does not overlap
119     // with other mappings.
120     uptr stacksize = rl.rlim_cur;
121     if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
122     // When running with unlimited stack size, we still want to set some limit.
123     // The unlimited stack size is caused by 'ulimit -s unlimited'.
124     // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
125     if (stacksize > kMaxThreadStackSize)
126       stacksize = kMaxThreadStackSize;
127     *stack_top = segment.end;
128     *stack_bottom = segment.end - stacksize;
129     return;
130   }
131   uptr stacksize = 0;
132   void *stackaddr = nullptr;
133 #if SANITIZER_SOLARIS
134   stack_t ss;
135   CHECK_EQ(thr_stksegment(&ss), 0);
136   stacksize = ss.ss_size;
137   stackaddr = (char *)ss.ss_sp - stacksize;
138 #elif SANITIZER_OPENBSD
139   stack_t sattr;
140   CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0);
141   stackaddr = sattr.ss_sp;
142   stacksize = sattr.ss_size;
143 #else  // !SANITIZER_SOLARIS
144   pthread_attr_t attr;
145   pthread_attr_init(&attr);
146   CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
147   my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
148   pthread_attr_destroy(&attr);
149 #endif // SANITIZER_SOLARIS
150 
151   *stack_top = (uptr)stackaddr + stacksize;
152   *stack_bottom = (uptr)stackaddr;
153 }
154 
155 #if !SANITIZER_GO
156 bool SetEnv(const char *name, const char *value) {
157   void *f = dlsym(RTLD_NEXT, "setenv");
158   if (!f)
159     return false;
160   typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
161   setenv_ft setenv_f;
162   CHECK_EQ(sizeof(setenv_f), sizeof(f));
163   internal_memcpy(&setenv_f, &f, sizeof(f));
164   return setenv_f(name, value, 1) == 0;
165 }
166 #endif
167 
168 __attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
169                                                    int *patch) {
170 #ifdef _CS_GNU_LIBC_VERSION
171   char buf[64];
172   uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
173   if (len >= sizeof(buf))
174     return false;
175   buf[len] = 0;
176   static const char kGLibC[] = "glibc ";
177   if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)
178     return false;
179   const char *p = buf + sizeof(kGLibC) - 1;
180   *major = internal_simple_strtoll(p, &p, 10);
181   *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
182   *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
183   return true;
184 #else
185   return false;
186 #endif
187 }
188 
189 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&               \
190     !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
191 static uptr g_tls_size;
192 
193 #ifdef __i386__
194 # ifndef __GLIBC_PREREQ
195 #  define CHECK_GET_TLS_STATIC_INFO_VERSION 1
196 # else
197 #  define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
198 # endif
199 #else
200 # define CHECK_GET_TLS_STATIC_INFO_VERSION 0
201 #endif
202 
203 #if CHECK_GET_TLS_STATIC_INFO_VERSION
204 # define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
205 #else
206 # define DL_INTERNAL_FUNCTION
207 #endif
208 
209 namespace {
210 struct GetTlsStaticInfoCall {
211   typedef void (*get_tls_func)(size_t*, size_t*);
212 };
213 struct GetTlsStaticInfoRegparmCall {
214   typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
215 };
216 
217 template <typename T>
218 void CallGetTls(void* ptr, size_t* size, size_t* align) {
219   typename T::get_tls_func get_tls;
220   CHECK_EQ(sizeof(get_tls), sizeof(ptr));
221   internal_memcpy(&get_tls, &ptr, sizeof(ptr));
222   CHECK_NE(get_tls, 0);
223   get_tls(size, align);
224 }
225 
226 bool CmpLibcVersion(int major, int minor, int patch) {
227   int ma;
228   int mi;
229   int pa;
230   if (!GetLibcVersion(&ma, &mi, &pa))
231     return false;
232   if (ma > major)
233     return true;
234   if (ma < major)
235     return false;
236   if (mi > minor)
237     return true;
238   if (mi < minor)
239     return false;
240   return pa >= patch;
241 }
242 
243 }  // namespace
244 
245 void InitTlsSize() {
246   // all current supported platforms have 16 bytes stack alignment
247   const size_t kStackAlign = 16;
248   void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
249   size_t tls_size = 0;
250   size_t tls_align = 0;
251   // On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
252   // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
253   // function in 2.27 and later.
254   if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
255     CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
256                                             &tls_size, &tls_align);
257   else
258     CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
259                                      &tls_size, &tls_align);
260   if (tls_align < kStackAlign)
261     tls_align = kStackAlign;
262   g_tls_size = RoundUpTo(tls_size, tls_align);
263 }
264 #else
265 void InitTlsSize() { }
266 #endif  // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
267         // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
268 
269 #if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) ||          \
270      defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) ||    \
271      defined(__arm__)) &&                                                      \
272     SANITIZER_LINUX && !SANITIZER_ANDROID
273 // sizeof(struct pthread) from glibc.
274 static atomic_uintptr_t thread_descriptor_size;
275 
276 uptr ThreadDescriptorSize() {
277   uptr val = atomic_load_relaxed(&thread_descriptor_size);
278   if (val)
279     return val;
280 #if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
281   int major;
282   int minor;
283   int patch;
284   if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
285     /* sizeof(struct pthread) values from various glibc versions.  */
286     if (SANITIZER_X32)
287       val = 1728; // Assume only one particular version for x32.
288     // For ARM sizeof(struct pthread) changed in Glibc 2.23.
289     else if (SANITIZER_ARM)
290       val = minor <= 22 ? 1120 : 1216;
291     else if (minor <= 3)
292       val = FIRST_32_SECOND_64(1104, 1696);
293     else if (minor == 4)
294       val = FIRST_32_SECOND_64(1120, 1728);
295     else if (minor == 5)
296       val = FIRST_32_SECOND_64(1136, 1728);
297     else if (minor <= 9)
298       val = FIRST_32_SECOND_64(1136, 1712);
299     else if (minor == 10)
300       val = FIRST_32_SECOND_64(1168, 1776);
301     else if (minor == 11 || (minor == 12 && patch == 1))
302       val = FIRST_32_SECOND_64(1168, 2288);
303     else if (minor <= 14)
304       val = FIRST_32_SECOND_64(1168, 2304);
305     else
306       val = FIRST_32_SECOND_64(1216, 2304);
307   }
308 #elif defined(__mips__)
309   // TODO(sagarthakur): add more values as per different glibc versions.
310   val = FIRST_32_SECOND_64(1152, 1776);
311 #elif defined(__aarch64__)
312   // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
313   val = 1776;
314 #elif defined(__powerpc64__)
315   val = 1776; // from glibc.ppc64le 2.20-8.fc21
316 #elif defined(__s390__)
317   val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
318 #endif
319   if (val)
320     atomic_store_relaxed(&thread_descriptor_size, val);
321   return val;
322 }
323 
324 // The offset at which pointer to self is located in the thread descriptor.
325 const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
326 
327 uptr ThreadSelfOffset() {
328   return kThreadSelfOffset;
329 }
330 
331 #if defined(__mips__) || defined(__powerpc64__)
332 // TlsPreTcbSize includes size of struct pthread_descr and size of tcb
333 // head structure. It lies before the static tls blocks.
334 static uptr TlsPreTcbSize() {
335 # if defined(__mips__)
336   const uptr kTcbHead = 16; // sizeof (tcbhead_t)
337 # elif defined(__powerpc64__)
338   const uptr kTcbHead = 88; // sizeof (tcbhead_t)
339 # endif
340   const uptr kTlsAlign = 16;
341   const uptr kTlsPreTcbSize =
342       RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
343   return kTlsPreTcbSize;
344 }
345 #endif
346 
347 uptr ThreadSelf() {
348   uptr descr_addr;
349 # if defined(__i386__)
350   asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
351 # elif defined(__x86_64__)
352   asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
353 # elif defined(__mips__)
354   // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
355   // points to the end of the TCB + 0x7000. The pthread_descr structure is
356   // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
357   // TCB and the size of pthread_descr.
358   const uptr kTlsTcbOffset = 0x7000;
359   uptr thread_pointer;
360   asm volatile(".set push;\
361                 .set mips64r2;\
362                 rdhwr %0,$29;\
363                 .set pop" : "=r" (thread_pointer));
364   descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
365 # elif defined(__aarch64__) || defined(__arm__)
366   descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
367                                       ThreadDescriptorSize();
368 # elif defined(__s390__)
369   descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
370 # elif defined(__powerpc64__)
371   // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
372   // points to the end of the TCB + 0x7000. The pthread_descr structure is
373   // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
374   // TCB and the size of pthread_descr.
375   const uptr kTlsTcbOffset = 0x7000;
376   uptr thread_pointer;
377   asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
378   descr_addr = thread_pointer - TlsPreTcbSize();
379 # else
380 #  error "unsupported CPU arch"
381 # endif
382   return descr_addr;
383 }
384 #endif  // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
385 
386 #if SANITIZER_FREEBSD
387 static void **ThreadSelfSegbase() {
388   void **segbase = 0;
389 # if defined(__i386__)
390   // sysarch(I386_GET_GSBASE, segbase);
391   __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
392 # elif defined(__x86_64__)
393   // sysarch(AMD64_GET_FSBASE, segbase);
394   __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
395 # else
396 #  error "unsupported CPU arch"
397 # endif
398   return segbase;
399 }
400 
401 uptr ThreadSelf() {
402   return (uptr)ThreadSelfSegbase()[2];
403 }
404 #endif  // SANITIZER_FREEBSD
405 
406 #if SANITIZER_NETBSD
407 static struct tls_tcb * ThreadSelfTlsTcb() {
408   struct tls_tcb * tcb;
409 # ifdef __HAVE___LWP_GETTCB_FAST
410   tcb = (struct tls_tcb *)__lwp_gettcb_fast();
411 # elif defined(__HAVE___LWP_GETPRIVATE_FAST)
412   tcb = (struct tls_tcb *)__lwp_getprivate_fast();
413 # endif
414   return tcb;
415 }
416 
417 uptr ThreadSelf() {
418   return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
419 }
420 
421 int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
422   const Elf_Phdr *hdr = info->dlpi_phdr;
423   const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;
424 
425   for (; hdr != last_hdr; ++hdr) {
426     if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
427       *(uptr*)data = hdr->p_memsz;
428       break;
429     }
430   }
431   return 0;
432 }
433 #endif  // SANITIZER_NETBSD
434 
435 #if !SANITIZER_GO
436 static void GetTls(uptr *addr, uptr *size) {
437 #if SANITIZER_LINUX && !SANITIZER_ANDROID
438 # if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
439   *addr = ThreadSelf();
440   *size = GetTlsSize();
441   *addr -= *size;
442   *addr += ThreadDescriptorSize();
443 # elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
444     || defined(__arm__)
445   *addr = ThreadSelf();
446   *size = GetTlsSize();
447 # else
448   *addr = 0;
449   *size = 0;
450 # endif
451 #elif SANITIZER_FREEBSD
452   void** segbase = ThreadSelfSegbase();
453   *addr = 0;
454   *size = 0;
455   if (segbase != 0) {
456     // tcbalign = 16
457     // tls_size = round(tls_static_space, tcbalign);
458     // dtv = segbase[1];
459     // dtv[2] = segbase - tls_static_space;
460     void **dtv = (void**) segbase[1];
461     *addr = (uptr) dtv[2];
462     *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
463   }
464 #elif SANITIZER_NETBSD
465   struct tls_tcb * const tcb = ThreadSelfTlsTcb();
466   *addr = 0;
467   *size = 0;
468   if (tcb != 0) {
469     // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).
470     // ld.elf_so hardcodes the index 1.
471     dl_iterate_phdr(GetSizeFromHdr, size);
472 
473     if (*size != 0) {
474       // The block has been found and tcb_dtv[1] contains the base address
475       *addr = (uptr)tcb->tcb_dtv[1];
476     }
477   }
478 #elif SANITIZER_OPENBSD
479   *addr = 0;
480   *size = 0;
481 #elif SANITIZER_ANDROID
482   *addr = 0;
483   *size = 0;
484 #elif SANITIZER_SOLARIS
485   // FIXME
486   *addr = 0;
487   *size = 0;
488 #else
489 # error "Unknown OS"
490 #endif
491 }
492 #endif
493 
494 #if !SANITIZER_GO
495 uptr GetTlsSize() {
496 #if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD ||              \
497     SANITIZER_OPENBSD || SANITIZER_SOLARIS
498   uptr addr, size;
499   GetTls(&addr, &size);
500   return size;
501 #elif defined(__mips__) || defined(__powerpc64__)
502   return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
503 #else
504   return g_tls_size;
505 #endif
506 }
507 #endif
508 
509 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
510                           uptr *tls_addr, uptr *tls_size) {
511 #if SANITIZER_GO
512   // Stub implementation for Go.
513   *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
514 #else
515   GetTls(tls_addr, tls_size);
516 
517   uptr stack_top, stack_bottom;
518   GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
519   *stk_addr = stack_bottom;
520   *stk_size = stack_top - stack_bottom;
521 
522   if (!main) {
523     // If stack and tls intersect, make them non-intersecting.
524     if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
525       CHECK_GT(*tls_addr + *tls_size, *stk_addr);
526       CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
527       *stk_size -= *tls_size;
528       *tls_addr = *stk_addr + *stk_size;
529     }
530   }
531 #endif
532 }
533 
534 #if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
535 typedef ElfW(Phdr) Elf_Phdr;
536 #elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
537 #define Elf_Phdr XElf32_Phdr
538 #define dl_phdr_info xdl_phdr_info
539 #define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
540 #endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
541 
542 struct DlIteratePhdrData {
543   InternalMmapVectorNoCtor<LoadedModule> *modules;
544   bool first;
545 };
546 
547 static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
548   DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
549   InternalScopedString module_name(kMaxPathLength);
550   if (data->first) {
551     data->first = false;
552     // First module is the binary itself.
553     ReadBinaryNameCached(module_name.data(), module_name.size());
554   } else if (info->dlpi_name) {
555     module_name.append("%s", info->dlpi_name);
556   }
557   if (module_name[0] == '\0')
558     return 0;
559   LoadedModule cur_module;
560   cur_module.set(module_name.data(), info->dlpi_addr);
561   for (int i = 0; i < (int)info->dlpi_phnum; i++) {
562     const Elf_Phdr *phdr = &info->dlpi_phdr[i];
563     if (phdr->p_type == PT_LOAD) {
564       uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
565       uptr cur_end = cur_beg + phdr->p_memsz;
566       bool executable = phdr->p_flags & PF_X;
567       bool writable = phdr->p_flags & PF_W;
568       cur_module.addAddressRange(cur_beg, cur_end, executable,
569                                  writable);
570     }
571   }
572   data->modules->push_back(cur_module);
573   return 0;
574 }
575 
576 #if SANITIZER_ANDROID && __ANDROID_API__ < 21
577 extern "C" __attribute__((weak)) int dl_iterate_phdr(
578     int (*)(struct dl_phdr_info *, size_t, void *), void *);
579 #endif
580 
581 static bool requiresProcmaps() {
582 #if SANITIZER_ANDROID && __ANDROID_API__ <= 22
583   // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
584   // The runtime check allows the same library to work with
585   // both K and L (and future) Android releases.
586   return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
587 #else
588   return false;
589 #endif
590 }
591 
592 static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
593   MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
594   memory_mapping.DumpListOfModules(modules);
595 }
596 
597 void ListOfModules::init() {
598   clearOrInit();
599   if (requiresProcmaps()) {
600     procmapsInit(&modules_);
601   } else {
602     DlIteratePhdrData data = {&modules_, true};
603     dl_iterate_phdr(dl_iterate_phdr_cb, &data);
604   }
605 }
606 
607 // When a custom loader is used, dl_iterate_phdr may not contain the full
608 // list of modules. Allow callers to fall back to using procmaps.
609 void ListOfModules::fallbackInit() {
610   if (!requiresProcmaps()) {
611     clearOrInit();
612     procmapsInit(&modules_);
613   } else {
614     clear();
615   }
616 }
617 
618 // getrusage does not give us the current RSS, only the max RSS.
619 // Still, this is better than nothing if /proc/self/statm is not available
620 // for some reason, e.g. due to a sandbox.
621 static uptr GetRSSFromGetrusage() {
622   struct rusage usage;
623   if (getrusage(RUSAGE_SELF, &usage))  // Failed, probably due to a sandbox.
624     return 0;
625   return usage.ru_maxrss << 10;  // ru_maxrss is in Kb.
626 }
627 
628 uptr GetRSS() {
629   if (!common_flags()->can_use_proc_maps_statm)
630     return GetRSSFromGetrusage();
631   fd_t fd = OpenFile("/proc/self/statm", RdOnly);
632   if (fd == kInvalidFd)
633     return GetRSSFromGetrusage();
634   char buf[64];
635   uptr len = internal_read(fd, buf, sizeof(buf) - 1);
636   internal_close(fd);
637   if ((sptr)len <= 0)
638     return 0;
639   buf[len] = 0;
640   // The format of the file is:
641   // 1084 89 69 11 0 79 0
642   // We need the second number which is RSS in pages.
643   char *pos = buf;
644   // Skip the first number.
645   while (*pos >= '0' && *pos <= '9')
646     pos++;
647   // Skip whitespaces.
648   while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
649     pos++;
650   // Read the number.
651   uptr rss = 0;
652   while (*pos >= '0' && *pos <= '9')
653     rss = rss * 10 + *pos++ - '0';
654   return rss * GetPageSizeCached();
655 }
656 
657 // sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
658 // they allocate memory.
659 u32 GetNumberOfCPUs() {
660 #if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
661   u32 ncpu;
662   int req[2];
663   uptr len = sizeof(ncpu);
664   req[0] = CTL_HW;
665   req[1] = HW_NCPU;
666   CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
667   return ncpu;
668 #elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
669   // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
670   // exist in sched.h. That is the case for toolchains generated with older
671   // NDKs.
672   // This code doesn't work on AArch64 because internal_getdents makes use of
673   // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
674   uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
675   if (internal_iserror(fd))
676     return 0;
677   InternalMmapVector<u8> buffer(4096);
678   uptr bytes_read = buffer.size();
679   uptr n_cpus = 0;
680   u8 *d_type;
681   struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
682   while (true) {
683     if ((u8 *)entry >= &buffer[bytes_read]) {
684       bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
685                                      buffer.size());
686       if (internal_iserror(bytes_read) || !bytes_read)
687         break;
688       entry = (struct linux_dirent *)buffer.data();
689     }
690     d_type = (u8 *)entry + entry->d_reclen - 1;
691     if (d_type >= &buffer[bytes_read] ||
692         (u8 *)&entry->d_name[3] >= &buffer[bytes_read])
693       break;
694     if (entry->d_ino != 0 && *d_type == DT_DIR) {
695       if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
696           entry->d_name[2] == 'u' &&
697           entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
698         n_cpus++;
699     }
700     entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
701   }
702   internal_close(fd);
703   return n_cpus;
704 #elif SANITIZER_SOLARIS
705   return sysconf(_SC_NPROCESSORS_ONLN);
706 #else
707   cpu_set_t CPUs;
708   CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
709   return CPU_COUNT(&CPUs);
710 #endif
711 }
712 
713 #if SANITIZER_LINUX
714 
715 # if SANITIZER_ANDROID
716 static atomic_uint8_t android_log_initialized;
717 
718 void AndroidLogInit() {
719   openlog(GetProcessName(), 0, LOG_USER);
720   atomic_store(&android_log_initialized, 1, memory_order_release);
721 }
722 
723 static bool ShouldLogAfterPrintf() {
724   return atomic_load(&android_log_initialized, memory_order_acquire);
725 }
726 
727 extern "C" SANITIZER_WEAK_ATTRIBUTE
728 int async_safe_write_log(int pri, const char* tag, const char* msg);
729 extern "C" SANITIZER_WEAK_ATTRIBUTE
730 int __android_log_write(int prio, const char* tag, const char* msg);
731 
732 // ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
733 #define SANITIZER_ANDROID_LOG_INFO 4
734 
735 // async_safe_write_log is a new public version of __libc_write_log that is
736 // used behind syslog. It is preferable to syslog as it will not do any dynamic
737 // memory allocation or formatting.
738 // If the function is not available, syslog is preferred for L+ (it was broken
739 // pre-L) as __android_log_write triggers a racey behavior with the strncpy
740 // interceptor. Fallback to __android_log_write pre-L.
741 void WriteOneLineToSyslog(const char *s) {
742   if (&async_safe_write_log) {
743     async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
744   } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
745     syslog(LOG_INFO, "%s", s);
746   } else {
747     CHECK(&__android_log_write);
748     __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
749   }
750 }
751 
752 extern "C" SANITIZER_WEAK_ATTRIBUTE
753 void android_set_abort_message(const char *);
754 
755 void SetAbortMessage(const char *str) {
756   if (&android_set_abort_message)
757     android_set_abort_message(str);
758 }
759 # else
760 void AndroidLogInit() {}
761 
762 static bool ShouldLogAfterPrintf() { return true; }
763 
764 void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
765 
766 void SetAbortMessage(const char *str) {}
767 # endif  // SANITIZER_ANDROID
768 
769 void LogMessageOnPrintf(const char *str) {
770   if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
771     WriteToSyslog(str);
772 }
773 
774 #endif  // SANITIZER_LINUX
775 
776 #if SANITIZER_LINUX && !SANITIZER_GO
777 // glibc crashes when using clock_gettime from a preinit_array function as the
778 // vDSO function pointers haven't been initialized yet. __progname is
779 // initialized after the vDSO function pointers, so if it exists, is not null
780 // and is not empty, we can use clock_gettime.
781 extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
782 INLINE bool CanUseVDSO() {
783   // Bionic is safe, it checks for the vDSO function pointers to be initialized.
784   if (SANITIZER_ANDROID)
785     return true;
786   if (&__progname && __progname && *__progname)
787     return true;
788   return false;
789 }
790 
791 // MonotonicNanoTime is a timing function that can leverage the vDSO by calling
792 // clock_gettime. real_clock_gettime only exists if clock_gettime is
793 // intercepted, so define it weakly and use it if available.
794 extern "C" SANITIZER_WEAK_ATTRIBUTE
795 int real_clock_gettime(u32 clk_id, void *tp);
796 u64 MonotonicNanoTime() {
797   timespec ts;
798   if (CanUseVDSO()) {
799     if (&real_clock_gettime)
800       real_clock_gettime(CLOCK_MONOTONIC, &ts);
801     else
802       clock_gettime(CLOCK_MONOTONIC, &ts);
803   } else {
804     internal_clock_gettime(CLOCK_MONOTONIC, &ts);
805   }
806   return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
807 }
808 #else
809 // Non-Linux & Go always use the syscall.
810 u64 MonotonicNanoTime() {
811   timespec ts;
812   internal_clock_gettime(CLOCK_MONOTONIC, &ts);
813   return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
814 }
815 #endif  // SANITIZER_LINUX && !SANITIZER_GO
816 
817 #if !SANITIZER_OPENBSD
818 void ReExec() {
819   const char *pathname = "/proc/self/exe";
820 
821 #if SANITIZER_NETBSD
822   static const int name[] = {
823       CTL_KERN,
824       KERN_PROC_ARGS,
825       -1,
826       KERN_PROC_PATHNAME,
827   };
828   char path[400];
829   uptr len;
830 
831   len = sizeof(path);
832   if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
833     pathname = path;
834 #elif SANITIZER_SOLARIS
835   pathname = getexecname();
836   CHECK_NE(pathname, NULL);
837 #elif SANITIZER_USE_GETAUXVAL
838   // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
839   // rely on that will fail to load shared libraries. Query AT_EXECFN instead.
840   pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
841 #endif
842 
843   uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
844   int rverrno;
845   CHECK_EQ(internal_iserror(rv, &rverrno), true);
846   Printf("execve failed, errno %d\n", rverrno);
847   Die();
848 }
849 #endif  // !SANITIZER_OPENBSD
850 
851 } // namespace __sanitizer
852 
853 #endif
854