1 //===-- sanitizer_fuchsia.cc ---------------------------------------------===// 2 // 3 // This file is distributed under the University of Illinois Open Source 4 // License. See LICENSE.TXT for details. 5 // 6 //===---------------------------------------------------------------------===// 7 // 8 // This file is shared between AddressSanitizer and other sanitizer 9 // run-time libraries and implements Fuchsia-specific functions from 10 // sanitizer_common.h. 11 //===---------------------------------------------------------------------===// 12 13 #include "sanitizer_fuchsia.h" 14 #if SANITIZER_FUCHSIA 15 16 #include "sanitizer_common.h" 17 #include "sanitizer_libc.h" 18 #include "sanitizer_mutex.h" 19 #include "sanitizer_stacktrace.h" 20 21 #include <limits.h> 22 #include <pthread.h> 23 #include <stdlib.h> 24 #include <unistd.h> 25 #include <unwind.h> 26 #include <zircon/errors.h> 27 #include <zircon/process.h> 28 #include <zircon/syscalls.h> 29 30 namespace __sanitizer { 31 32 void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); } 33 34 uptr internal_sched_yield() { 35 zx_status_t status = _zx_nanosleep(0); 36 CHECK_EQ(status, ZX_OK); 37 return 0; // Why doesn't this return void? 38 } 39 40 static void internal_nanosleep(zx_time_t ns) { 41 zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns)); 42 CHECK_EQ(status, ZX_OK); 43 } 44 45 unsigned int internal_sleep(unsigned int seconds) { 46 internal_nanosleep(ZX_SEC(seconds)); 47 return 0; 48 } 49 50 u64 NanoTime() { return _zx_time_get(ZX_CLOCK_UTC); } 51 52 uptr internal_getpid() { 53 zx_info_handle_basic_t info; 54 zx_status_t status = 55 _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info, 56 sizeof(info), NULL, NULL); 57 CHECK_EQ(status, ZX_OK); 58 uptr pid = static_cast<uptr>(info.koid); 59 CHECK_EQ(pid, info.koid); 60 return pid; 61 } 62 63 uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); } 64 65 uptr GetTid() { return GetThreadSelf(); } 66 67 void Abort() { abort(); } 68 69 int Atexit(void (*function)(void)) { return atexit(function); } 70 71 void SleepForSeconds(int seconds) { internal_sleep(seconds); } 72 73 void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); } 74 75 void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) { 76 pthread_attr_t attr; 77 CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0); 78 void *base; 79 size_t size; 80 CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0); 81 CHECK_EQ(pthread_attr_destroy(&attr), 0); 82 83 *stack_bottom = reinterpret_cast<uptr>(base); 84 *stack_top = *stack_bottom + size; 85 } 86 87 void MaybeReexec() {} 88 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {} 89 void DisableCoreDumperIfNecessary() {} 90 void InstallDeadlySignalHandlers(SignalHandlerType handler) {} 91 void StartReportDeadlySignal() {} 92 void ReportDeadlySignal(const SignalContext &sig, u32 tid, 93 UnwindSignalStackCallbackType unwind, 94 const void *unwind_context) {} 95 void SetAlternateSignalStack() {} 96 void UnsetAlternateSignalStack() {} 97 void InitTlsSize() {} 98 99 void PrintModuleMap() {} 100 101 bool SignalContext::IsStackOverflow() const { return false; } 102 void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); } 103 const char *SignalContext::Describe() const { UNIMPLEMENTED(); } 104 105 struct UnwindTraceArg { 106 BufferedStackTrace *stack; 107 u32 max_depth; 108 }; 109 110 _Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) { 111 UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param); 112 CHECK_LT(arg->stack->size, arg->max_depth); 113 uptr pc = _Unwind_GetIP(ctx); 114 if (pc < PAGE_SIZE) return _URC_NORMAL_STOP; 115 arg->stack->trace_buffer[arg->stack->size++] = pc; 116 return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP 117 : _URC_NO_REASON); 118 } 119 120 void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) { 121 CHECK_GE(max_depth, 2); 122 size = 0; 123 UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)}; 124 _Unwind_Backtrace(Unwind_Trace, &arg); 125 CHECK_GT(size, 0); 126 // We need to pop a few frames so that pc is on top. 127 uptr to_pop = LocatePcInTrace(pc); 128 // trace_buffer[0] belongs to the current function so we always pop it, 129 // unless there is only 1 frame in the stack trace (1 frame is always better 130 // than 0!). 131 PopStackFrames(Min(to_pop, static_cast<uptr>(1))); 132 trace_buffer[0] = pc; 133 } 134 135 void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context, 136 u32 max_depth) { 137 CHECK_NE(context, nullptr); 138 UNREACHABLE("signal context doesn't exist"); 139 } 140 141 enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 }; 142 143 BlockingMutex::BlockingMutex() { 144 // NOTE! It's important that this use internal_memset, because plain 145 // memset might be intercepted (e.g., actually be __asan_memset). 146 // Defining this so the compiler initializes each field, e.g.: 147 // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {} 148 // might result in the compiler generating a call to memset, which would 149 // have the same problem. 150 internal_memset(this, 0, sizeof(*this)); 151 } 152 153 void BlockingMutex::Lock() { 154 CHECK_EQ(owner_, 0); 155 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); 156 if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked) 157 return; 158 while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) { 159 zx_status_t status = _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), 160 MtxSleeping, ZX_TIME_INFINITE); 161 if (status != ZX_ERR_BAD_STATE) // Normal race. 162 CHECK_EQ(status, ZX_OK); 163 } 164 } 165 166 void BlockingMutex::Unlock() { 167 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); 168 u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release); 169 CHECK_NE(v, MtxUnlocked); 170 if (v == MtxSleeping) { 171 zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1); 172 CHECK_EQ(status, ZX_OK); 173 } 174 } 175 176 void BlockingMutex::CheckLocked() { 177 atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_); 178 CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed)); 179 } 180 181 uptr GetPageSize() { return PAGE_SIZE; } 182 183 uptr GetMmapGranularity() { return PAGE_SIZE; } 184 185 sanitizer_shadow_bounds_t ShadowBounds; 186 187 uptr GetMaxVirtualAddress() { 188 ShadowBounds = __sanitizer_shadow_bounds(); 189 return ShadowBounds.memory_limit - 1; 190 } 191 192 static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type, 193 bool raw_report, bool die_for_nomem) { 194 size = RoundUpTo(size, PAGE_SIZE); 195 196 zx_handle_t vmo; 197 zx_status_t status = _zx_vmo_create(size, 0, &vmo); 198 if (status != ZX_OK) { 199 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) 200 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, 201 raw_report); 202 return nullptr; 203 } 204 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, 205 internal_strlen(mem_type)); 206 207 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? 208 uintptr_t addr; 209 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size, 210 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); 211 _zx_handle_close(vmo); 212 213 if (status != ZX_OK) { 214 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) 215 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, 216 raw_report); 217 return nullptr; 218 } 219 220 IncreaseTotalMmap(size); 221 222 return reinterpret_cast<void *>(addr); 223 } 224 225 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) { 226 return DoAnonymousMmapOrDie(size, mem_type, raw_report, true); 227 } 228 229 void *MmapNoReserveOrDie(uptr size, const char *mem_type) { 230 return MmapOrDie(size, mem_type); 231 } 232 233 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) { 234 return DoAnonymousMmapOrDie(size, mem_type, false, false); 235 } 236 237 // MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator. 238 // Instead of doing exactly what they say, we make MmapNoAccess actually 239 // just allocate a VMAR to reserve the address space. Then MmapFixedOrDie 240 // uses that VMAR instead of the root. 241 242 zx_handle_t allocator_vmar = ZX_HANDLE_INVALID; 243 uintptr_t allocator_vmar_base; 244 size_t allocator_vmar_size; 245 246 void *MmapNoAccess(uptr size) { 247 size = RoundUpTo(size, PAGE_SIZE); 248 CHECK_EQ(allocator_vmar, ZX_HANDLE_INVALID); 249 uintptr_t base; 250 zx_status_t status = 251 _zx_vmar_allocate(_zx_vmar_root_self(), 0, size, 252 ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | 253 ZX_VM_FLAG_CAN_MAP_SPECIFIC, 254 &allocator_vmar, &base); 255 if (status != ZX_OK) 256 ReportMmapFailureAndDie(size, "sanitizer allocator address space", 257 "zx_vmar_allocate", status); 258 259 allocator_vmar_base = base; 260 allocator_vmar_size = size; 261 return reinterpret_cast<void *>(base); 262 } 263 264 constexpr const char kAllocatorVmoName[] = "sanitizer_allocator"; 265 266 static void *DoMmapFixedOrDie(uptr fixed_addr, uptr size, bool die_for_nomem) { 267 size = RoundUpTo(size, PAGE_SIZE); 268 269 zx_handle_t vmo; 270 zx_status_t status = _zx_vmo_create(size, 0, &vmo); 271 if (status != ZX_OK) { 272 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) 273 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmo_create", status); 274 return nullptr; 275 } 276 _zx_object_set_property(vmo, ZX_PROP_NAME, kAllocatorVmoName, 277 sizeof(kAllocatorVmoName) - 1); 278 279 DCHECK_GE(fixed_addr, allocator_vmar_base); 280 uintptr_t offset = fixed_addr - allocator_vmar_base; 281 DCHECK_LE(size, allocator_vmar_size); 282 DCHECK_GE(allocator_vmar_size - offset, size); 283 284 uintptr_t addr; 285 status = _zx_vmar_map( 286 allocator_vmar, offset, vmo, 0, size, 287 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC, 288 &addr); 289 _zx_handle_close(vmo); 290 if (status != ZX_OK) { 291 if (status != ZX_ERR_NO_MEMORY || die_for_nomem) 292 ReportMmapFailureAndDie(size, kAllocatorVmoName, "zx_vmar_map", status); 293 return nullptr; 294 } 295 296 IncreaseTotalMmap(size); 297 298 return reinterpret_cast<void *>(addr); 299 } 300 301 void *MmapFixedOrDie(uptr fixed_addr, uptr size) { 302 return DoMmapFixedOrDie(fixed_addr, size, true); 303 } 304 305 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { 306 return DoMmapFixedOrDie(fixed_addr, size, false); 307 } 308 309 // This should never be called. 310 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { 311 UNIMPLEMENTED(); 312 } 313 314 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 315 const char *mem_type) { 316 CHECK_GE(size, PAGE_SIZE); 317 CHECK(IsPowerOfTwo(size)); 318 CHECK(IsPowerOfTwo(alignment)); 319 320 zx_handle_t vmo; 321 zx_status_t status = _zx_vmo_create(size, 0, &vmo); 322 if (status != ZX_OK) { 323 if (status != ZX_ERR_NO_MEMORY) 324 ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false); 325 return nullptr; 326 } 327 _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type, 328 internal_strlen(mem_type)); 329 330 // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? 331 332 // Map a larger size to get a chunk of address space big enough that 333 // it surely contains an aligned region of the requested size. Then 334 // overwrite the aligned middle portion with a mapping from the 335 // beginning of the VMO, and unmap the excess before and after. 336 size_t map_size = size + alignment; 337 uintptr_t addr; 338 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, 339 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); 340 if (status == ZX_OK) { 341 uintptr_t map_addr = addr; 342 uintptr_t map_end = map_addr + map_size; 343 addr = RoundUpTo(map_addr, alignment); 344 uintptr_t end = addr + size; 345 if (addr != map_addr) { 346 zx_info_vmar_t info; 347 status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info, 348 sizeof(info), NULL, NULL); 349 if (status == ZX_OK) { 350 uintptr_t new_addr; 351 status = 352 _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size, 353 ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | 354 ZX_VM_FLAG_SPECIFIC_OVERWRITE, 355 &new_addr); 356 if (status == ZX_OK) CHECK_EQ(new_addr, addr); 357 } 358 } 359 if (status == ZX_OK && addr != map_addr) 360 status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr); 361 if (status == ZX_OK && end != map_end) 362 status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end); 363 } 364 _zx_handle_close(vmo); 365 366 if (status != ZX_OK) { 367 if (status != ZX_ERR_NO_MEMORY) 368 ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false); 369 return nullptr; 370 } 371 372 IncreaseTotalMmap(size); 373 374 return reinterpret_cast<void *>(addr); 375 } 376 377 void UnmapOrDie(void *addr, uptr size) { 378 if (!addr || !size) return; 379 size = RoundUpTo(size, PAGE_SIZE); 380 381 zx_status_t status = _zx_vmar_unmap(_zx_vmar_root_self(), 382 reinterpret_cast<uintptr_t>(addr), size); 383 if (status != ZX_OK) { 384 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n", 385 SanitizerToolName, size, size, addr); 386 CHECK("unable to unmap" && 0); 387 } 388 389 DecreaseTotalMmap(size); 390 } 391 392 // This is used on the shadow mapping, which cannot be changed. 393 // Zircon doesn't have anything like MADV_DONTNEED. 394 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {} 395 396 void DumpProcessMap() { 397 UNIMPLEMENTED(); // TODO(mcgrathr): write it 398 } 399 400 bool IsAccessibleMemoryRange(uptr beg, uptr size) { 401 // TODO(mcgrathr): Figure out a better way. 402 zx_handle_t vmo; 403 zx_status_t status = _zx_vmo_create(size, 0, &vmo); 404 if (status == ZX_OK) { 405 while (size > 0) { 406 size_t wrote; 407 status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size, 408 &wrote); 409 if (status != ZX_OK) break; 410 CHECK_GT(wrote, 0); 411 CHECK_LE(wrote, size); 412 beg += wrote; 413 size -= wrote; 414 } 415 _zx_handle_close(vmo); 416 } 417 return status == ZX_OK; 418 } 419 420 // FIXME implement on this platform. 421 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {} 422 423 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 424 uptr *read_len, uptr max_len, error_t *errno_p) { 425 zx_handle_t vmo; 426 zx_status_t status = __sanitizer_get_configuration(file_name, &vmo); 427 if (status == ZX_OK) { 428 uint64_t vmo_size; 429 status = _zx_vmo_get_size(vmo, &vmo_size); 430 if (status == ZX_OK) { 431 if (vmo_size < max_len) max_len = vmo_size; 432 size_t map_size = RoundUpTo(max_len, PAGE_SIZE); 433 uintptr_t addr; 434 status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, 435 ZX_VM_FLAG_PERM_READ, &addr); 436 if (status == ZX_OK) { 437 *buff = reinterpret_cast<char *>(addr); 438 *buff_size = map_size; 439 *read_len = max_len; 440 } 441 } 442 _zx_handle_close(vmo); 443 } 444 if (status != ZX_OK && errno_p) *errno_p = status; 445 return status == ZX_OK; 446 } 447 448 void RawWrite(const char *buffer) { 449 __sanitizer_log_write(buffer, internal_strlen(buffer)); 450 } 451 452 void CatastrophicErrorWrite(const char *buffer, uptr length) { 453 __sanitizer_log_write(buffer, length); 454 } 455 456 char **StoredArgv; 457 char **StoredEnviron; 458 459 char **GetArgv() { return StoredArgv; } 460 461 const char *GetEnv(const char *name) { 462 if (StoredEnviron) { 463 uptr NameLen = internal_strlen(name); 464 for (char **Env = StoredEnviron; *Env != 0; Env++) { 465 if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=') 466 return (*Env) + NameLen + 1; 467 } 468 } 469 return nullptr; 470 } 471 472 uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) { 473 const char *argv0 = StoredArgv[0]; 474 if (!argv0) argv0 = "<UNKNOWN>"; 475 internal_strncpy(buf, argv0, buf_len); 476 return internal_strlen(buf); 477 } 478 479 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) { 480 return ReadBinaryName(buf, buf_len); 481 } 482 483 uptr MainThreadStackBase, MainThreadStackSize; 484 485 bool GetRandom(void *buffer, uptr length, bool blocking) { 486 CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN); 487 size_t size; 488 CHECK_EQ(_zx_cprng_draw(buffer, length, &size), ZX_OK); 489 CHECK_EQ(size, length); 490 return true; 491 } 492 493 } // namespace __sanitizer 494 495 using namespace __sanitizer; // NOLINT 496 497 extern "C" { 498 void __sanitizer_startup_hook(int argc, char **argv, char **envp, 499 void *stack_base, size_t stack_size) { 500 __sanitizer::StoredArgv = argv; 501 __sanitizer::StoredEnviron = envp; 502 __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base); 503 __sanitizer::MainThreadStackSize = stack_size; 504 } 505 506 void __sanitizer_set_report_path(const char *path) { 507 // Handle the initialization code in each sanitizer, but no other calls. 508 // This setting is never consulted on Fuchsia. 509 DCHECK_EQ(path, common_flags()->log_path); 510 } 511 512 void __sanitizer_set_report_fd(void *fd) { 513 UNREACHABLE("not available on Fuchsia"); 514 } 515 } // extern "C" 516 517 #endif // SANITIZER_FUCHSIA 518