xref: /freebsd-src/contrib/llvm-project/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1*68d75effSDimitry Andric //===-- sanitizer_fuchsia.cpp ---------------------------------------------===//
2*68d75effSDimitry Andric //
3*68d75effSDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4*68d75effSDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5*68d75effSDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6*68d75effSDimitry Andric //
7*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
8*68d75effSDimitry Andric //
9*68d75effSDimitry Andric // This file is shared between AddressSanitizer and other sanitizer
10*68d75effSDimitry Andric // run-time libraries and implements Fuchsia-specific functions from
11*68d75effSDimitry Andric // sanitizer_common.h.
12*68d75effSDimitry Andric //===----------------------------------------------------------------------===//
13*68d75effSDimitry Andric 
14*68d75effSDimitry Andric #include "sanitizer_fuchsia.h"
15*68d75effSDimitry Andric #if SANITIZER_FUCHSIA
16*68d75effSDimitry Andric 
17*68d75effSDimitry Andric #include "sanitizer_common.h"
18*68d75effSDimitry Andric #include "sanitizer_libc.h"
19*68d75effSDimitry Andric #include "sanitizer_mutex.h"
20*68d75effSDimitry Andric 
21*68d75effSDimitry Andric #include <limits.h>
22*68d75effSDimitry Andric #include <pthread.h>
23*68d75effSDimitry Andric #include <stdlib.h>
24*68d75effSDimitry Andric #include <unistd.h>
25*68d75effSDimitry Andric #include <zircon/errors.h>
26*68d75effSDimitry Andric #include <zircon/process.h>
27*68d75effSDimitry Andric #include <zircon/syscalls.h>
28*68d75effSDimitry Andric 
29*68d75effSDimitry Andric namespace __sanitizer {
30*68d75effSDimitry Andric 
31*68d75effSDimitry Andric void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
32*68d75effSDimitry Andric 
33*68d75effSDimitry Andric uptr internal_sched_yield() {
34*68d75effSDimitry Andric   zx_status_t status = _zx_nanosleep(0);
35*68d75effSDimitry Andric   CHECK_EQ(status, ZX_OK);
36*68d75effSDimitry Andric   return 0;  // Why doesn't this return void?
37*68d75effSDimitry Andric }
38*68d75effSDimitry Andric 
39*68d75effSDimitry Andric static void internal_nanosleep(zx_time_t ns) {
40*68d75effSDimitry Andric   zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
41*68d75effSDimitry Andric   CHECK_EQ(status, ZX_OK);
42*68d75effSDimitry Andric }
43*68d75effSDimitry Andric 
44*68d75effSDimitry Andric unsigned int internal_sleep(unsigned int seconds) {
45*68d75effSDimitry Andric   internal_nanosleep(ZX_SEC(seconds));
46*68d75effSDimitry Andric   return 0;
47*68d75effSDimitry Andric }
48*68d75effSDimitry Andric 
49*68d75effSDimitry Andric u64 NanoTime() {
50*68d75effSDimitry Andric   zx_time_t time;
51*68d75effSDimitry Andric   zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time);
52*68d75effSDimitry Andric   CHECK_EQ(status, ZX_OK);
53*68d75effSDimitry Andric   return time;
54*68d75effSDimitry Andric }
55*68d75effSDimitry Andric 
56*68d75effSDimitry Andric u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
57*68d75effSDimitry Andric 
58*68d75effSDimitry Andric uptr internal_getpid() {
59*68d75effSDimitry Andric   zx_info_handle_basic_t info;
60*68d75effSDimitry Andric   zx_status_t status =
61*68d75effSDimitry Andric       _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
62*68d75effSDimitry Andric                           sizeof(info), NULL, NULL);
63*68d75effSDimitry Andric   CHECK_EQ(status, ZX_OK);
64*68d75effSDimitry Andric   uptr pid = static_cast<uptr>(info.koid);
65*68d75effSDimitry Andric   CHECK_EQ(pid, info.koid);
66*68d75effSDimitry Andric   return pid;
67*68d75effSDimitry Andric }
68*68d75effSDimitry Andric 
69*68d75effSDimitry Andric uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
70*68d75effSDimitry Andric 
71*68d75effSDimitry Andric tid_t GetTid() { return GetThreadSelf(); }
72*68d75effSDimitry Andric 
73*68d75effSDimitry Andric void Abort() { abort(); }
74*68d75effSDimitry Andric 
75*68d75effSDimitry Andric int Atexit(void (*function)(void)) { return atexit(function); }
76*68d75effSDimitry Andric 
77*68d75effSDimitry Andric void SleepForSeconds(int seconds) { internal_sleep(seconds); }
78*68d75effSDimitry Andric 
79*68d75effSDimitry Andric void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
80*68d75effSDimitry Andric 
81*68d75effSDimitry Andric void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
82*68d75effSDimitry Andric   pthread_attr_t attr;
83*68d75effSDimitry Andric   CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
84*68d75effSDimitry Andric   void *base;
85*68d75effSDimitry Andric   size_t size;
86*68d75effSDimitry Andric   CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
87*68d75effSDimitry Andric   CHECK_EQ(pthread_attr_destroy(&attr), 0);
88*68d75effSDimitry Andric 
89*68d75effSDimitry Andric   *stack_bottom = reinterpret_cast<uptr>(base);
90*68d75effSDimitry Andric   *stack_top = *stack_bottom + size;
91*68d75effSDimitry Andric }
92*68d75effSDimitry Andric 
93*68d75effSDimitry Andric void InitializePlatformEarly() {}
94*68d75effSDimitry Andric void MaybeReexec() {}
95*68d75effSDimitry Andric void CheckASLR() {}
96*68d75effSDimitry Andric void CheckMPROTECT() {}
97*68d75effSDimitry Andric void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
98*68d75effSDimitry Andric void DisableCoreDumperIfNecessary() {}
99*68d75effSDimitry Andric void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
100*68d75effSDimitry Andric void SetAlternateSignalStack() {}
101*68d75effSDimitry Andric void UnsetAlternateSignalStack() {}
102*68d75effSDimitry Andric void InitTlsSize() {}
103*68d75effSDimitry Andric 
104*68d75effSDimitry Andric void PrintModuleMap() {}
105*68d75effSDimitry Andric 
106*68d75effSDimitry Andric bool SignalContext::IsStackOverflow() const { return false; }
107*68d75effSDimitry Andric void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
108*68d75effSDimitry Andric const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
109*68d75effSDimitry Andric 
110*68d75effSDimitry Andric enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
111*68d75effSDimitry Andric 
112*68d75effSDimitry Andric BlockingMutex::BlockingMutex() {
113*68d75effSDimitry Andric   // NOTE!  It's important that this use internal_memset, because plain
114*68d75effSDimitry Andric   // memset might be intercepted (e.g., actually be __asan_memset).
115*68d75effSDimitry Andric   // Defining this so the compiler initializes each field, e.g.:
116*68d75effSDimitry Andric   //   BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
117*68d75effSDimitry Andric   // might result in the compiler generating a call to memset, which would
118*68d75effSDimitry Andric   // have the same problem.
119*68d75effSDimitry Andric   internal_memset(this, 0, sizeof(*this));
120*68d75effSDimitry Andric }
121*68d75effSDimitry Andric 
122*68d75effSDimitry Andric void BlockingMutex::Lock() {
123*68d75effSDimitry Andric   CHECK_EQ(owner_, 0);
124*68d75effSDimitry Andric   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
125*68d75effSDimitry Andric   if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
126*68d75effSDimitry Andric     return;
127*68d75effSDimitry Andric   while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
128*68d75effSDimitry Andric     zx_status_t status =
129*68d75effSDimitry Andric         _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
130*68d75effSDimitry Andric                        ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
131*68d75effSDimitry Andric     if (status != ZX_ERR_BAD_STATE)  // Normal race.
132*68d75effSDimitry Andric       CHECK_EQ(status, ZX_OK);
133*68d75effSDimitry Andric   }
134*68d75effSDimitry Andric }
135*68d75effSDimitry Andric 
136*68d75effSDimitry Andric void BlockingMutex::Unlock() {
137*68d75effSDimitry Andric   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
138*68d75effSDimitry Andric   u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
139*68d75effSDimitry Andric   CHECK_NE(v, MtxUnlocked);
140*68d75effSDimitry Andric   if (v == MtxSleeping) {
141*68d75effSDimitry Andric     zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
142*68d75effSDimitry Andric     CHECK_EQ(status, ZX_OK);
143*68d75effSDimitry Andric   }
144*68d75effSDimitry Andric }
145*68d75effSDimitry Andric 
146*68d75effSDimitry Andric void BlockingMutex::CheckLocked() {
147*68d75effSDimitry Andric   atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
148*68d75effSDimitry Andric   CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
149*68d75effSDimitry Andric }
150*68d75effSDimitry Andric 
151*68d75effSDimitry Andric uptr GetPageSize() { return PAGE_SIZE; }
152*68d75effSDimitry Andric 
153*68d75effSDimitry Andric uptr GetMmapGranularity() { return PAGE_SIZE; }
154*68d75effSDimitry Andric 
155*68d75effSDimitry Andric sanitizer_shadow_bounds_t ShadowBounds;
156*68d75effSDimitry Andric 
157*68d75effSDimitry Andric uptr GetMaxUserVirtualAddress() {
158*68d75effSDimitry Andric   ShadowBounds = __sanitizer_shadow_bounds();
159*68d75effSDimitry Andric   return ShadowBounds.memory_limit - 1;
160*68d75effSDimitry Andric }
161*68d75effSDimitry Andric 
162*68d75effSDimitry Andric uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
163*68d75effSDimitry Andric 
164*68d75effSDimitry Andric static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
165*68d75effSDimitry Andric                                   bool raw_report, bool die_for_nomem) {
166*68d75effSDimitry Andric   size = RoundUpTo(size, PAGE_SIZE);
167*68d75effSDimitry Andric 
168*68d75effSDimitry Andric   zx_handle_t vmo;
169*68d75effSDimitry Andric   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
170*68d75effSDimitry Andric   if (status != ZX_OK) {
171*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
172*68d75effSDimitry Andric       ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
173*68d75effSDimitry Andric                               raw_report);
174*68d75effSDimitry Andric     return nullptr;
175*68d75effSDimitry Andric   }
176*68d75effSDimitry Andric   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
177*68d75effSDimitry Andric                           internal_strlen(mem_type));
178*68d75effSDimitry Andric 
179*68d75effSDimitry Andric   // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
180*68d75effSDimitry Andric   uintptr_t addr;
181*68d75effSDimitry Andric   status =
182*68d75effSDimitry Andric       _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
183*68d75effSDimitry Andric                    vmo, 0, size, &addr);
184*68d75effSDimitry Andric   _zx_handle_close(vmo);
185*68d75effSDimitry Andric 
186*68d75effSDimitry Andric   if (status != ZX_OK) {
187*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
188*68d75effSDimitry Andric       ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
189*68d75effSDimitry Andric                               raw_report);
190*68d75effSDimitry Andric     return nullptr;
191*68d75effSDimitry Andric   }
192*68d75effSDimitry Andric 
193*68d75effSDimitry Andric   IncreaseTotalMmap(size);
194*68d75effSDimitry Andric 
195*68d75effSDimitry Andric   return reinterpret_cast<void *>(addr);
196*68d75effSDimitry Andric }
197*68d75effSDimitry Andric 
198*68d75effSDimitry Andric void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
199*68d75effSDimitry Andric   return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
200*68d75effSDimitry Andric }
201*68d75effSDimitry Andric 
202*68d75effSDimitry Andric void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
203*68d75effSDimitry Andric   return MmapOrDie(size, mem_type);
204*68d75effSDimitry Andric }
205*68d75effSDimitry Andric 
206*68d75effSDimitry Andric void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
207*68d75effSDimitry Andric   return DoAnonymousMmapOrDie(size, mem_type, false, false);
208*68d75effSDimitry Andric }
209*68d75effSDimitry Andric 
210*68d75effSDimitry Andric uptr ReservedAddressRange::Init(uptr init_size, const char *name,
211*68d75effSDimitry Andric                                 uptr fixed_addr) {
212*68d75effSDimitry Andric   init_size = RoundUpTo(init_size, PAGE_SIZE);
213*68d75effSDimitry Andric   DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
214*68d75effSDimitry Andric   uintptr_t base;
215*68d75effSDimitry Andric   zx_handle_t vmar;
216*68d75effSDimitry Andric   zx_status_t status =
217*68d75effSDimitry Andric       _zx_vmar_allocate(
218*68d75effSDimitry Andric           _zx_vmar_root_self(),
219*68d75effSDimitry Andric           ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
220*68d75effSDimitry Andric           0, init_size, &vmar, &base);
221*68d75effSDimitry Andric   if (status != ZX_OK)
222*68d75effSDimitry Andric     ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
223*68d75effSDimitry Andric   base_ = reinterpret_cast<void *>(base);
224*68d75effSDimitry Andric   size_ = init_size;
225*68d75effSDimitry Andric   name_ = name;
226*68d75effSDimitry Andric   os_handle_ = vmar;
227*68d75effSDimitry Andric 
228*68d75effSDimitry Andric   return reinterpret_cast<uptr>(base_);
229*68d75effSDimitry Andric }
230*68d75effSDimitry Andric 
231*68d75effSDimitry Andric static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
232*68d75effSDimitry Andric                              void *base, const char *name, bool die_for_nomem) {
233*68d75effSDimitry Andric   uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
234*68d75effSDimitry Andric   map_size = RoundUpTo(map_size, PAGE_SIZE);
235*68d75effSDimitry Andric   zx_handle_t vmo;
236*68d75effSDimitry Andric   zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
237*68d75effSDimitry Andric   if (status != ZX_OK) {
238*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
239*68d75effSDimitry Andric       ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
240*68d75effSDimitry Andric     return 0;
241*68d75effSDimitry Andric   }
242*68d75effSDimitry Andric   _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
243*68d75effSDimitry Andric   DCHECK_GE(base + size_, map_size + offset);
244*68d75effSDimitry Andric   uintptr_t addr;
245*68d75effSDimitry Andric 
246*68d75effSDimitry Andric   status =
247*68d75effSDimitry Andric       _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
248*68d75effSDimitry Andric                    offset, vmo, 0, map_size, &addr);
249*68d75effSDimitry Andric   _zx_handle_close(vmo);
250*68d75effSDimitry Andric   if (status != ZX_OK) {
251*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
252*68d75effSDimitry Andric       ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
253*68d75effSDimitry Andric     }
254*68d75effSDimitry Andric     return 0;
255*68d75effSDimitry Andric   }
256*68d75effSDimitry Andric   IncreaseTotalMmap(map_size);
257*68d75effSDimitry Andric   return addr;
258*68d75effSDimitry Andric }
259*68d75effSDimitry Andric 
260*68d75effSDimitry Andric uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
261*68d75effSDimitry Andric                                const char *name) {
262*68d75effSDimitry Andric   return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
263*68d75effSDimitry Andric                           name_, false);
264*68d75effSDimitry Andric }
265*68d75effSDimitry Andric 
266*68d75effSDimitry Andric uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
267*68d75effSDimitry Andric                                     const char *name) {
268*68d75effSDimitry Andric   return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
269*68d75effSDimitry Andric                           name_, true);
270*68d75effSDimitry Andric }
271*68d75effSDimitry Andric 
272*68d75effSDimitry Andric void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
273*68d75effSDimitry Andric   if (!addr || !size) return;
274*68d75effSDimitry Andric   size = RoundUpTo(size, PAGE_SIZE);
275*68d75effSDimitry Andric 
276*68d75effSDimitry Andric   zx_status_t status =
277*68d75effSDimitry Andric       _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
278*68d75effSDimitry Andric   if (status != ZX_OK) {
279*68d75effSDimitry Andric     Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
280*68d75effSDimitry Andric            SanitizerToolName, size, size, addr);
281*68d75effSDimitry Andric     CHECK("unable to unmap" && 0);
282*68d75effSDimitry Andric   }
283*68d75effSDimitry Andric 
284*68d75effSDimitry Andric   DecreaseTotalMmap(size);
285*68d75effSDimitry Andric }
286*68d75effSDimitry Andric 
287*68d75effSDimitry Andric void ReservedAddressRange::Unmap(uptr addr, uptr size) {
288*68d75effSDimitry Andric   CHECK_LE(size, size_);
289*68d75effSDimitry Andric   const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
290*68d75effSDimitry Andric   if (addr == reinterpret_cast<uptr>(base_)) {
291*68d75effSDimitry Andric     if (size == size_) {
292*68d75effSDimitry Andric       // Destroying the vmar effectively unmaps the whole mapping.
293*68d75effSDimitry Andric       _zx_vmar_destroy(vmar);
294*68d75effSDimitry Andric       _zx_handle_close(vmar);
295*68d75effSDimitry Andric       os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
296*68d75effSDimitry Andric       DecreaseTotalMmap(size);
297*68d75effSDimitry Andric       return;
298*68d75effSDimitry Andric     }
299*68d75effSDimitry Andric   } else {
300*68d75effSDimitry Andric     CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
301*68d75effSDimitry Andric   }
302*68d75effSDimitry Andric   // Partial unmapping does not affect the fact that the initial range is still
303*68d75effSDimitry Andric   // reserved, and the resulting unmapped memory can't be reused.
304*68d75effSDimitry Andric   UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
305*68d75effSDimitry Andric }
306*68d75effSDimitry Andric 
307*68d75effSDimitry Andric // This should never be called.
308*68d75effSDimitry Andric void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
309*68d75effSDimitry Andric   UNIMPLEMENTED();
310*68d75effSDimitry Andric }
311*68d75effSDimitry Andric 
312*68d75effSDimitry Andric void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
313*68d75effSDimitry Andric                                    const char *mem_type) {
314*68d75effSDimitry Andric   CHECK_GE(size, PAGE_SIZE);
315*68d75effSDimitry Andric   CHECK(IsPowerOfTwo(size));
316*68d75effSDimitry Andric   CHECK(IsPowerOfTwo(alignment));
317*68d75effSDimitry Andric 
318*68d75effSDimitry Andric   zx_handle_t vmo;
319*68d75effSDimitry Andric   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
320*68d75effSDimitry Andric   if (status != ZX_OK) {
321*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY)
322*68d75effSDimitry Andric       ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
323*68d75effSDimitry Andric     return nullptr;
324*68d75effSDimitry Andric   }
325*68d75effSDimitry Andric   _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
326*68d75effSDimitry Andric                           internal_strlen(mem_type));
327*68d75effSDimitry Andric 
328*68d75effSDimitry Andric   // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
329*68d75effSDimitry Andric 
330*68d75effSDimitry Andric   // Map a larger size to get a chunk of address space big enough that
331*68d75effSDimitry Andric   // it surely contains an aligned region of the requested size.  Then
332*68d75effSDimitry Andric   // overwrite the aligned middle portion with a mapping from the
333*68d75effSDimitry Andric   // beginning of the VMO, and unmap the excess before and after.
334*68d75effSDimitry Andric   size_t map_size = size + alignment;
335*68d75effSDimitry Andric   uintptr_t addr;
336*68d75effSDimitry Andric   status =
337*68d75effSDimitry Andric       _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
338*68d75effSDimitry Andric                    vmo, 0, map_size, &addr);
339*68d75effSDimitry Andric   if (status == ZX_OK) {
340*68d75effSDimitry Andric     uintptr_t map_addr = addr;
341*68d75effSDimitry Andric     uintptr_t map_end = map_addr + map_size;
342*68d75effSDimitry Andric     addr = RoundUpTo(map_addr, alignment);
343*68d75effSDimitry Andric     uintptr_t end = addr + size;
344*68d75effSDimitry Andric     if (addr != map_addr) {
345*68d75effSDimitry Andric       zx_info_vmar_t info;
346*68d75effSDimitry Andric       status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
347*68d75effSDimitry Andric                                    sizeof(info), NULL, NULL);
348*68d75effSDimitry Andric       if (status == ZX_OK) {
349*68d75effSDimitry Andric         uintptr_t new_addr;
350*68d75effSDimitry Andric         status = _zx_vmar_map(
351*68d75effSDimitry Andric             _zx_vmar_root_self(),
352*68d75effSDimitry Andric             ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
353*68d75effSDimitry Andric             addr - info.base, vmo, 0, size, &new_addr);
354*68d75effSDimitry Andric         if (status == ZX_OK) CHECK_EQ(new_addr, addr);
355*68d75effSDimitry Andric       }
356*68d75effSDimitry Andric     }
357*68d75effSDimitry Andric     if (status == ZX_OK && addr != map_addr)
358*68d75effSDimitry Andric       status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
359*68d75effSDimitry Andric     if (status == ZX_OK && end != map_end)
360*68d75effSDimitry Andric       status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
361*68d75effSDimitry Andric   }
362*68d75effSDimitry Andric   _zx_handle_close(vmo);
363*68d75effSDimitry Andric 
364*68d75effSDimitry Andric   if (status != ZX_OK) {
365*68d75effSDimitry Andric     if (status != ZX_ERR_NO_MEMORY)
366*68d75effSDimitry Andric       ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
367*68d75effSDimitry Andric     return nullptr;
368*68d75effSDimitry Andric   }
369*68d75effSDimitry Andric 
370*68d75effSDimitry Andric   IncreaseTotalMmap(size);
371*68d75effSDimitry Andric 
372*68d75effSDimitry Andric   return reinterpret_cast<void *>(addr);
373*68d75effSDimitry Andric }
374*68d75effSDimitry Andric 
375*68d75effSDimitry Andric void UnmapOrDie(void *addr, uptr size) {
376*68d75effSDimitry Andric   UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
377*68d75effSDimitry Andric }
378*68d75effSDimitry Andric 
379*68d75effSDimitry Andric // This is used on the shadow mapping, which cannot be changed.
380*68d75effSDimitry Andric // Zircon doesn't have anything like MADV_DONTNEED.
381*68d75effSDimitry Andric void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
382*68d75effSDimitry Andric 
383*68d75effSDimitry Andric void DumpProcessMap() {
384*68d75effSDimitry Andric   // TODO(mcgrathr): write it
385*68d75effSDimitry Andric   return;
386*68d75effSDimitry Andric }
387*68d75effSDimitry Andric 
388*68d75effSDimitry Andric bool IsAccessibleMemoryRange(uptr beg, uptr size) {
389*68d75effSDimitry Andric   // TODO(mcgrathr): Figure out a better way.
390*68d75effSDimitry Andric   zx_handle_t vmo;
391*68d75effSDimitry Andric   zx_status_t status = _zx_vmo_create(size, 0, &vmo);
392*68d75effSDimitry Andric   if (status == ZX_OK) {
393*68d75effSDimitry Andric     status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
394*68d75effSDimitry Andric     _zx_handle_close(vmo);
395*68d75effSDimitry Andric   }
396*68d75effSDimitry Andric   return status == ZX_OK;
397*68d75effSDimitry Andric }
398*68d75effSDimitry Andric 
399*68d75effSDimitry Andric // FIXME implement on this platform.
400*68d75effSDimitry Andric void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
401*68d75effSDimitry Andric 
402*68d75effSDimitry Andric bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
403*68d75effSDimitry Andric                       uptr *read_len, uptr max_len, error_t *errno_p) {
404*68d75effSDimitry Andric   zx_handle_t vmo;
405*68d75effSDimitry Andric   zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
406*68d75effSDimitry Andric   if (status == ZX_OK) {
407*68d75effSDimitry Andric     uint64_t vmo_size;
408*68d75effSDimitry Andric     status = _zx_vmo_get_size(vmo, &vmo_size);
409*68d75effSDimitry Andric     if (status == ZX_OK) {
410*68d75effSDimitry Andric       if (vmo_size < max_len) max_len = vmo_size;
411*68d75effSDimitry Andric       size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
412*68d75effSDimitry Andric       uintptr_t addr;
413*68d75effSDimitry Andric       status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
414*68d75effSDimitry Andric                             map_size, &addr);
415*68d75effSDimitry Andric       if (status == ZX_OK) {
416*68d75effSDimitry Andric         *buff = reinterpret_cast<char *>(addr);
417*68d75effSDimitry Andric         *buff_size = map_size;
418*68d75effSDimitry Andric         *read_len = max_len;
419*68d75effSDimitry Andric       }
420*68d75effSDimitry Andric     }
421*68d75effSDimitry Andric     _zx_handle_close(vmo);
422*68d75effSDimitry Andric   }
423*68d75effSDimitry Andric   if (status != ZX_OK && errno_p) *errno_p = status;
424*68d75effSDimitry Andric   return status == ZX_OK;
425*68d75effSDimitry Andric }
426*68d75effSDimitry Andric 
427*68d75effSDimitry Andric void RawWrite(const char *buffer) {
428*68d75effSDimitry Andric   constexpr size_t size = 128;
429*68d75effSDimitry Andric   static _Thread_local char line[size];
430*68d75effSDimitry Andric   static _Thread_local size_t lastLineEnd = 0;
431*68d75effSDimitry Andric   static _Thread_local size_t cur = 0;
432*68d75effSDimitry Andric 
433*68d75effSDimitry Andric   while (*buffer) {
434*68d75effSDimitry Andric     if (cur >= size) {
435*68d75effSDimitry Andric       if (lastLineEnd == 0)
436*68d75effSDimitry Andric         lastLineEnd = size;
437*68d75effSDimitry Andric       __sanitizer_log_write(line, lastLineEnd);
438*68d75effSDimitry Andric       internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
439*68d75effSDimitry Andric       cur = cur - lastLineEnd;
440*68d75effSDimitry Andric       lastLineEnd = 0;
441*68d75effSDimitry Andric     }
442*68d75effSDimitry Andric     if (*buffer == '\n')
443*68d75effSDimitry Andric       lastLineEnd = cur + 1;
444*68d75effSDimitry Andric     line[cur++] = *buffer++;
445*68d75effSDimitry Andric   }
446*68d75effSDimitry Andric   // Flush all complete lines before returning.
447*68d75effSDimitry Andric   if (lastLineEnd != 0) {
448*68d75effSDimitry Andric     __sanitizer_log_write(line, lastLineEnd);
449*68d75effSDimitry Andric     internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
450*68d75effSDimitry Andric     cur = cur - lastLineEnd;
451*68d75effSDimitry Andric     lastLineEnd = 0;
452*68d75effSDimitry Andric   }
453*68d75effSDimitry Andric }
454*68d75effSDimitry Andric 
455*68d75effSDimitry Andric void CatastrophicErrorWrite(const char *buffer, uptr length) {
456*68d75effSDimitry Andric   __sanitizer_log_write(buffer, length);
457*68d75effSDimitry Andric }
458*68d75effSDimitry Andric 
459*68d75effSDimitry Andric char **StoredArgv;
460*68d75effSDimitry Andric char **StoredEnviron;
461*68d75effSDimitry Andric 
462*68d75effSDimitry Andric char **GetArgv() { return StoredArgv; }
463*68d75effSDimitry Andric char **GetEnviron() { return StoredEnviron; }
464*68d75effSDimitry Andric 
465*68d75effSDimitry Andric const char *GetEnv(const char *name) {
466*68d75effSDimitry Andric   if (StoredEnviron) {
467*68d75effSDimitry Andric     uptr NameLen = internal_strlen(name);
468*68d75effSDimitry Andric     for (char **Env = StoredEnviron; *Env != 0; Env++) {
469*68d75effSDimitry Andric       if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
470*68d75effSDimitry Andric         return (*Env) + NameLen + 1;
471*68d75effSDimitry Andric     }
472*68d75effSDimitry Andric   }
473*68d75effSDimitry Andric   return nullptr;
474*68d75effSDimitry Andric }
475*68d75effSDimitry Andric 
476*68d75effSDimitry Andric uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
477*68d75effSDimitry Andric   const char *argv0 = "<UNKNOWN>";
478*68d75effSDimitry Andric   if (StoredArgv && StoredArgv[0]) {
479*68d75effSDimitry Andric     argv0 = StoredArgv[0];
480*68d75effSDimitry Andric   }
481*68d75effSDimitry Andric   internal_strncpy(buf, argv0, buf_len);
482*68d75effSDimitry Andric   return internal_strlen(buf);
483*68d75effSDimitry Andric }
484*68d75effSDimitry Andric 
485*68d75effSDimitry Andric uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
486*68d75effSDimitry Andric   return ReadBinaryName(buf, buf_len);
487*68d75effSDimitry Andric }
488*68d75effSDimitry Andric 
489*68d75effSDimitry Andric uptr MainThreadStackBase, MainThreadStackSize;
490*68d75effSDimitry Andric 
491*68d75effSDimitry Andric bool GetRandom(void *buffer, uptr length, bool blocking) {
492*68d75effSDimitry Andric   CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
493*68d75effSDimitry Andric   _zx_cprng_draw(buffer, length);
494*68d75effSDimitry Andric   return true;
495*68d75effSDimitry Andric }
496*68d75effSDimitry Andric 
497*68d75effSDimitry Andric u32 GetNumberOfCPUs() {
498*68d75effSDimitry Andric   return zx_system_get_num_cpus();
499*68d75effSDimitry Andric }
500*68d75effSDimitry Andric 
501*68d75effSDimitry Andric uptr GetRSS() { UNIMPLEMENTED(); }
502*68d75effSDimitry Andric 
503*68d75effSDimitry Andric }  // namespace __sanitizer
504*68d75effSDimitry Andric 
505*68d75effSDimitry Andric using namespace __sanitizer;
506*68d75effSDimitry Andric 
507*68d75effSDimitry Andric extern "C" {
508*68d75effSDimitry Andric void __sanitizer_startup_hook(int argc, char **argv, char **envp,
509*68d75effSDimitry Andric                               void *stack_base, size_t stack_size) {
510*68d75effSDimitry Andric   __sanitizer::StoredArgv = argv;
511*68d75effSDimitry Andric   __sanitizer::StoredEnviron = envp;
512*68d75effSDimitry Andric   __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
513*68d75effSDimitry Andric   __sanitizer::MainThreadStackSize = stack_size;
514*68d75effSDimitry Andric }
515*68d75effSDimitry Andric 
516*68d75effSDimitry Andric void __sanitizer_set_report_path(const char *path) {
517*68d75effSDimitry Andric   // Handle the initialization code in each sanitizer, but no other calls.
518*68d75effSDimitry Andric   // This setting is never consulted on Fuchsia.
519*68d75effSDimitry Andric   DCHECK_EQ(path, common_flags()->log_path);
520*68d75effSDimitry Andric }
521*68d75effSDimitry Andric 
522*68d75effSDimitry Andric void __sanitizer_set_report_fd(void *fd) {
523*68d75effSDimitry Andric   UNREACHABLE("not available on Fuchsia");
524*68d75effSDimitry Andric }
525*68d75effSDimitry Andric }  // extern "C"
526*68d75effSDimitry Andric 
527*68d75effSDimitry Andric #endif  // SANITIZER_FUCHSIA
528