xref: /netbsd-src/external/gpl3/gcc.old/dist/libsanitizer/sanitizer_common/sanitizer_win.cc (revision c0a68be459da21030695f60d10265c2fc49758f8)
1 //===-- sanitizer_win.cc --------------------------------------------------===//
2 //
3 // This file is distributed under the University of Illinois Open Source
4 // License. See LICENSE.TXT for details.
5 //
6 //===----------------------------------------------------------------------===//
7 //
8 // This file is shared between AddressSanitizer and ThreadSanitizer
9 // run-time libraries and implements windows-specific functions from
10 // sanitizer_libc.h.
11 //===----------------------------------------------------------------------===//
12 
13 #include "sanitizer_platform.h"
14 #if SANITIZER_WINDOWS
15 
16 #define WIN32_LEAN_AND_MEAN
17 #define NOGDI
18 #include <windows.h>
19 #include <io.h>
20 #include <psapi.h>
21 #include <stdlib.h>
22 
23 #include "sanitizer_common.h"
24 #include "sanitizer_file.h"
25 #include "sanitizer_libc.h"
26 #include "sanitizer_mutex.h"
27 #include "sanitizer_placement_new.h"
28 #include "sanitizer_win_defs.h"
29 
30 #if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
31 #pragma comment(lib, "psapi")
32 #endif
33 
34 // A macro to tell the compiler that this part of the code cannot be reached,
35 // if the compiler supports this feature. Since we're using this in
36 // code that is called when terminating the process, the expansion of the
37 // macro should not terminate the process to avoid infinite recursion.
38 #if defined(__clang__)
39 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
40 #elif defined(__GNUC__) && \
41     (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
42 # define BUILTIN_UNREACHABLE() __builtin_unreachable()
43 #elif defined(_MSC_VER)
44 # define BUILTIN_UNREACHABLE() __assume(0)
45 #else
46 # define BUILTIN_UNREACHABLE()
47 #endif
48 
49 namespace __sanitizer {
50 
51 #include "sanitizer_syscall_generic.inc"
52 
53 // --------------------- sanitizer_common.h
GetPageSize()54 uptr GetPageSize() {
55   SYSTEM_INFO si;
56   GetSystemInfo(&si);
57   return si.dwPageSize;
58 }
59 
GetMmapGranularity()60 uptr GetMmapGranularity() {
61   SYSTEM_INFO si;
62   GetSystemInfo(&si);
63   return si.dwAllocationGranularity;
64 }
65 
GetMaxUserVirtualAddress()66 uptr GetMaxUserVirtualAddress() {
67   SYSTEM_INFO si;
68   GetSystemInfo(&si);
69   return (uptr)si.lpMaximumApplicationAddress;
70 }
71 
GetMaxVirtualAddress()72 uptr GetMaxVirtualAddress() {
73   return GetMaxUserVirtualAddress();
74 }
75 
FileExists(const char * filename)76 bool FileExists(const char *filename) {
77   return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
78 }
79 
internal_getpid()80 uptr internal_getpid() {
81   return GetProcessId(GetCurrentProcess());
82 }
83 
internal_dlinfo(void * handle,int request,void * p)84 int internal_dlinfo(void *handle, int request, void *p) {
85   UNIMPLEMENTED();
86 }
87 
88 // In contrast to POSIX, on Windows GetCurrentThreadId()
89 // returns a system-unique identifier.
GetTid()90 tid_t GetTid() {
91   return GetCurrentThreadId();
92 }
93 
GetThreadSelf()94 uptr GetThreadSelf() {
95   return GetTid();
96 }
97 
98 #if !SANITIZER_GO
GetThreadStackTopAndBottom(bool at_initialization,uptr * stack_top,uptr * stack_bottom)99 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
100                                 uptr *stack_bottom) {
101   CHECK(stack_top);
102   CHECK(stack_bottom);
103   MEMORY_BASIC_INFORMATION mbi;
104   CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
105   // FIXME: is it possible for the stack to not be a single allocation?
106   // Are these values what ASan expects to get (reserved, not committed;
107   // including stack guard page) ?
108   *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
109   *stack_bottom = (uptr)mbi.AllocationBase;
110 }
111 #endif  // #if !SANITIZER_GO
112 
MmapOrDie(uptr size,const char * mem_type,bool raw_report)113 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
114   void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
115   if (rv == 0)
116     ReportMmapFailureAndDie(size, mem_type, "allocate",
117                             GetLastError(), raw_report);
118   return rv;
119 }
120 
UnmapOrDie(void * addr,uptr size)121 void UnmapOrDie(void *addr, uptr size) {
122   if (!size || !addr)
123     return;
124 
125   MEMORY_BASIC_INFORMATION mbi;
126   CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
127 
128   // MEM_RELEASE can only be used to unmap whole regions previously mapped with
129   // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
130   // fails try MEM_DECOMMIT.
131   if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
132     if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
133       Report("ERROR: %s failed to "
134              "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
135              SanitizerToolName, size, size, addr, GetLastError());
136       CHECK("unable to unmap" && 0);
137     }
138   }
139 }
140 
ReturnNullptrOnOOMOrDie(uptr size,const char * mem_type,const char * mmap_type)141 static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
142                                      const char *mmap_type) {
143   error_t last_error = GetLastError();
144   if (last_error == ERROR_NOT_ENOUGH_MEMORY)
145     return nullptr;
146   ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
147 }
148 
MmapOrDieOnFatalError(uptr size,const char * mem_type)149 void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
150   void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
151   if (rv == 0)
152     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
153   return rv;
154 }
155 
156 // We want to map a chunk of address space aligned to 'alignment'.
MmapAlignedOrDieOnFatalError(uptr size,uptr alignment,const char * mem_type)157 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
158                                    const char *mem_type) {
159   CHECK(IsPowerOfTwo(size));
160   CHECK(IsPowerOfTwo(alignment));
161 
162   // Windows will align our allocations to at least 64K.
163   alignment = Max(alignment, GetMmapGranularity());
164 
165   uptr mapped_addr =
166       (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
167   if (!mapped_addr)
168     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
169 
170   // If we got it right on the first try, return. Otherwise, unmap it and go to
171   // the slow path.
172   if (IsAligned(mapped_addr, alignment))
173     return (void*)mapped_addr;
174   if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
175     ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
176 
177   // If we didn't get an aligned address, overallocate, find an aligned address,
178   // unmap, and try to allocate at that aligned address.
179   int retries = 0;
180   const int kMaxRetries = 10;
181   for (; retries < kMaxRetries &&
182          (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
183        retries++) {
184     // Overallocate size + alignment bytes.
185     mapped_addr =
186         (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
187     if (!mapped_addr)
188       return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
189 
190     // Find the aligned address.
191     uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
192 
193     // Free the overallocation.
194     if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
195       ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
196 
197     // Attempt to allocate exactly the number of bytes we need at the aligned
198     // address. This may fail for a number of reasons, in which case we continue
199     // the loop.
200     mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
201                                      MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
202   }
203 
204   // Fail if we can't make this work quickly.
205   if (retries == kMaxRetries && mapped_addr == 0)
206     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
207 
208   return (void *)mapped_addr;
209 }
210 
MmapFixedNoReserve(uptr fixed_addr,uptr size,const char * name)211 bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
212   // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
213   // but on Win64 it does.
214   (void)name;  // unsupported
215 #if !SANITIZER_GO && SANITIZER_WINDOWS64
216   // On asan/Windows64, use MEM_COMMIT would result in error
217   // 1455:ERROR_COMMITMENT_LIMIT.
218   // Asan uses exception handler to commit page on demand.
219   void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
220 #else
221   void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
222                          PAGE_READWRITE);
223 #endif
224   if (p == 0) {
225     Report("ERROR: %s failed to "
226            "allocate %p (%zd) bytes at %p (error code: %d)\n",
227            SanitizerToolName, size, size, fixed_addr, GetLastError());
228     return false;
229   }
230   return true;
231 }
232 
233 // Memory space mapped by 'MmapFixedOrDie' must have been reserved by
234 // 'MmapFixedNoAccess'.
MmapFixedOrDie(uptr fixed_addr,uptr size)235 void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
236   void *p = VirtualAlloc((LPVOID)fixed_addr, size,
237       MEM_COMMIT, PAGE_READWRITE);
238   if (p == 0) {
239     char mem_type[30];
240     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
241                       fixed_addr);
242     ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
243   }
244   return p;
245 }
246 
247 // Uses fixed_addr for now.
248 // Will use offset instead once we've implemented this function for real.
Map(uptr fixed_addr,uptr size)249 uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size) {
250   return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
251 }
252 
MapOrDie(uptr fixed_addr,uptr size)253 uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size) {
254   return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
255 }
256 
Unmap(uptr addr,uptr size)257 void ReservedAddressRange::Unmap(uptr addr, uptr size) {
258   // Only unmap if it covers the entire range.
259   CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));
260   // We unmap the whole range, just null out the base.
261   base_ = nullptr;
262   size_ = 0;
263   UnmapOrDie(reinterpret_cast<void*>(addr), size);
264 }
265 
MmapFixedOrDieOnFatalError(uptr fixed_addr,uptr size)266 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
267   void *p = VirtualAlloc((LPVOID)fixed_addr, size,
268       MEM_COMMIT, PAGE_READWRITE);
269   if (p == 0) {
270     char mem_type[30];
271     internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
272                       fixed_addr);
273     return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
274   }
275   return p;
276 }
277 
MmapNoReserveOrDie(uptr size,const char * mem_type)278 void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
279   // FIXME: make this really NoReserve?
280   return MmapOrDie(size, mem_type);
281 }
282 
Init(uptr size,const char * name,uptr fixed_addr)283 uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
284   base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
285   size_ = size;
286   name_ = name;
287   (void)os_handle_;  // unsupported
288   return reinterpret_cast<uptr>(base_);
289 }
290 
291 
MmapFixedNoAccess(uptr fixed_addr,uptr size,const char * name)292 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
293   (void)name; // unsupported
294   void *res = VirtualAlloc((LPVOID)fixed_addr, size,
295                            MEM_RESERVE, PAGE_NOACCESS);
296   if (res == 0)
297     Report("WARNING: %s failed to "
298            "mprotect %p (%zd) bytes at %p (error code: %d)\n",
299            SanitizerToolName, size, size, fixed_addr, GetLastError());
300   return res;
301 }
302 
MmapNoAccess(uptr size)303 void *MmapNoAccess(uptr size) {
304   void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
305   if (res == 0)
306     Report("WARNING: %s failed to "
307            "mprotect %p (%zd) bytes (error code: %d)\n",
308            SanitizerToolName, size, size, GetLastError());
309   return res;
310 }
311 
MprotectNoAccess(uptr addr,uptr size)312 bool MprotectNoAccess(uptr addr, uptr size) {
313   DWORD old_protection;
314   return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
315 }
316 
ReleaseMemoryPagesToOS(uptr beg,uptr end)317 void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
318   // This is almost useless on 32-bits.
319   // FIXME: add madvise-analog when we move to 64-bits.
320 }
321 
NoHugePagesInRegion(uptr addr,uptr size)322 bool NoHugePagesInRegion(uptr addr, uptr size) {
323   // FIXME: probably similar to ReleaseMemoryToOS.
324   return true;
325 }
326 
DontDumpShadowMemory(uptr addr,uptr length)327 bool DontDumpShadowMemory(uptr addr, uptr length) {
328   // This is almost useless on 32-bits.
329   // FIXME: add madvise-analog when we move to 64-bits.
330   return true;
331 }
332 
FindAvailableMemoryRange(uptr size,uptr alignment,uptr left_padding,uptr * largest_gap_found,uptr * max_occupied_addr)333 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
334                               uptr *largest_gap_found,
335                               uptr *max_occupied_addr) {
336   uptr address = 0;
337   while (true) {
338     MEMORY_BASIC_INFORMATION info;
339     if (!::VirtualQuery((void*)address, &info, sizeof(info)))
340       return 0;
341 
342     if (info.State == MEM_FREE) {
343       uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
344                                       alignment);
345       if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
346         return shadow_address;
347     }
348 
349     // Move to the next region.
350     address = (uptr)info.BaseAddress + info.RegionSize;
351   }
352   return 0;
353 }
354 
MemoryRangeIsAvailable(uptr range_start,uptr range_end)355 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
356   MEMORY_BASIC_INFORMATION mbi;
357   CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
358   return mbi.Protect == PAGE_NOACCESS &&
359          (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
360 }
361 
MapFileToMemory(const char * file_name,uptr * buff_size)362 void *MapFileToMemory(const char *file_name, uptr *buff_size) {
363   UNIMPLEMENTED();
364 }
365 
MapWritableFileToMemory(void * addr,uptr size,fd_t fd,OFF_T offset)366 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
367   UNIMPLEMENTED();
368 }
369 
370 static const int kMaxEnvNameLength = 128;
371 static const DWORD kMaxEnvValueLength = 32767;
372 
373 namespace {
374 
375 struct EnvVariable {
376   char name[kMaxEnvNameLength];
377   char value[kMaxEnvValueLength];
378 };
379 
380 }  // namespace
381 
382 static const int kEnvVariables = 5;
383 static EnvVariable env_vars[kEnvVariables];
384 static int num_env_vars;
385 
GetEnv(const char * name)386 const char *GetEnv(const char *name) {
387   // Note: this implementation caches the values of the environment variables
388   // and limits their quantity.
389   for (int i = 0; i < num_env_vars; i++) {
390     if (0 == internal_strcmp(name, env_vars[i].name))
391       return env_vars[i].value;
392   }
393   CHECK_LT(num_env_vars, kEnvVariables);
394   DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
395                                      kMaxEnvValueLength);
396   if (rv > 0 && rv < kMaxEnvValueLength) {
397     CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
398     internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
399     num_env_vars++;
400     return env_vars[num_env_vars - 1].value;
401   }
402   return 0;
403 }
404 
GetPwd()405 const char *GetPwd() {
406   UNIMPLEMENTED();
407 }
408 
GetUid()409 u32 GetUid() {
410   UNIMPLEMENTED();
411 }
412 
413 namespace {
414 struct ModuleInfo {
415   const char *filepath;
416   uptr base_address;
417   uptr end_address;
418 };
419 
420 #if !SANITIZER_GO
CompareModulesBase(const void * pl,const void * pr)421 int CompareModulesBase(const void *pl, const void *pr) {
422   const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;
423   if (l->base_address < r->base_address)
424     return -1;
425   return l->base_address > r->base_address;
426 }
427 #endif
428 }  // namespace
429 
430 #if !SANITIZER_GO
DumpProcessMap()431 void DumpProcessMap() {
432   Report("Dumping process modules:\n");
433   ListOfModules modules;
434   modules.init();
435   uptr num_modules = modules.size();
436 
437   InternalMmapVector<ModuleInfo> module_infos(num_modules);
438   for (size_t i = 0; i < num_modules; ++i) {
439     module_infos[i].filepath = modules[i].full_name();
440     module_infos[i].base_address = modules[i].ranges().front()->beg;
441     module_infos[i].end_address = modules[i].ranges().back()->end;
442   }
443   qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
444         CompareModulesBase);
445 
446   for (size_t i = 0; i < num_modules; ++i) {
447     const ModuleInfo &mi = module_infos[i];
448     if (mi.end_address != 0) {
449       Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
450              mi.filepath[0] ? mi.filepath : "[no name]");
451     } else if (mi.filepath[0]) {
452       Printf("\t??\?-??? %s\n", mi.filepath);
453     } else {
454       Printf("\t???\n");
455     }
456   }
457 }
458 #endif
459 
PrintModuleMap()460 void PrintModuleMap() { }
461 
DisableCoreDumperIfNecessary()462 void DisableCoreDumperIfNecessary() {
463   // Do nothing.
464 }
465 
ReExec()466 void ReExec() {
467   UNIMPLEMENTED();
468 }
469 
PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments * args)470 void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
471 
StackSizeIsUnlimited()472 bool StackSizeIsUnlimited() {
473   UNIMPLEMENTED();
474 }
475 
SetStackSizeLimitInBytes(uptr limit)476 void SetStackSizeLimitInBytes(uptr limit) {
477   UNIMPLEMENTED();
478 }
479 
AddressSpaceIsUnlimited()480 bool AddressSpaceIsUnlimited() {
481   UNIMPLEMENTED();
482 }
483 
SetAddressSpaceUnlimited()484 void SetAddressSpaceUnlimited() {
485   UNIMPLEMENTED();
486 }
487 
IsPathSeparator(const char c)488 bool IsPathSeparator(const char c) {
489   return c == '\\' || c == '/';
490 }
491 
IsAbsolutePath(const char * path)492 bool IsAbsolutePath(const char *path) {
493   UNIMPLEMENTED();
494 }
495 
SleepForSeconds(int seconds)496 void SleepForSeconds(int seconds) {
497   Sleep(seconds * 1000);
498 }
499 
SleepForMillis(int millis)500 void SleepForMillis(int millis) {
501   Sleep(millis);
502 }
503 
NanoTime()504 u64 NanoTime() {
505   static LARGE_INTEGER frequency = {};
506   LARGE_INTEGER counter;
507   if (UNLIKELY(frequency.QuadPart == 0)) {
508     QueryPerformanceFrequency(&frequency);
509     CHECK_NE(frequency.QuadPart, 0);
510   }
511   QueryPerformanceCounter(&counter);
512   counter.QuadPart *= 1000ULL * 1000000ULL;
513   counter.QuadPart /= frequency.QuadPart;
514   return counter.QuadPart;
515 }
516 
MonotonicNanoTime()517 u64 MonotonicNanoTime() { return NanoTime(); }
518 
Abort()519 void Abort() {
520   internal__exit(3);
521 }
522 
523 #if !SANITIZER_GO
524 // Read the file to extract the ImageBase field from the PE header. If ASLR is
525 // disabled and this virtual address is available, the loader will typically
526 // load the image at this address. Therefore, we call it the preferred base. Any
527 // addresses in the DWARF typically assume that the object has been loaded at
528 // this address.
GetPreferredBase(const char * modname)529 static uptr GetPreferredBase(const char *modname) {
530   fd_t fd = OpenFile(modname, RdOnly, nullptr);
531   if (fd == kInvalidFd)
532     return 0;
533   FileCloser closer(fd);
534 
535   // Read just the DOS header.
536   IMAGE_DOS_HEADER dos_header;
537   uptr bytes_read;
538   if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
539       bytes_read != sizeof(dos_header))
540     return 0;
541 
542   // The file should start with the right signature.
543   if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
544     return 0;
545 
546   // The layout at e_lfanew is:
547   // "PE\0\0"
548   // IMAGE_FILE_HEADER
549   // IMAGE_OPTIONAL_HEADER
550   // Seek to e_lfanew and read all that data.
551   char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
552   if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
553       INVALID_SET_FILE_POINTER)
554     return 0;
555   if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
556       bytes_read != sizeof(buf))
557     return 0;
558 
559   // Check for "PE\0\0" before the PE header.
560   char *pe_sig = &buf[0];
561   if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
562     return 0;
563 
564   // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
565   IMAGE_OPTIONAL_HEADER *pe_header =
566       (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
567 
568   // Check for more magic in the PE header.
569   if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
570     return 0;
571 
572   // Finally, return the ImageBase.
573   return (uptr)pe_header->ImageBase;
574 }
575 
init()576 void ListOfModules::init() {
577   clearOrInit();
578   HANDLE cur_process = GetCurrentProcess();
579 
580   // Query the list of modules.  Start by assuming there are no more than 256
581   // modules and retry if that's not sufficient.
582   HMODULE *hmodules = 0;
583   uptr modules_buffer_size = sizeof(HMODULE) * 256;
584   DWORD bytes_required;
585   while (!hmodules) {
586     hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
587     CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
588                              &bytes_required));
589     if (bytes_required > modules_buffer_size) {
590       // Either there turned out to be more than 256 hmodules, or new hmodules
591       // could have loaded since the last try.  Retry.
592       UnmapOrDie(hmodules, modules_buffer_size);
593       hmodules = 0;
594       modules_buffer_size = bytes_required;
595     }
596   }
597 
598   // |num_modules| is the number of modules actually present,
599   size_t num_modules = bytes_required / sizeof(HMODULE);
600   for (size_t i = 0; i < num_modules; ++i) {
601     HMODULE handle = hmodules[i];
602     MODULEINFO mi;
603     if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
604       continue;
605 
606     // Get the UTF-16 path and convert to UTF-8.
607     wchar_t modname_utf16[kMaxPathLength];
608     int modname_utf16_len =
609         GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
610     if (modname_utf16_len == 0)
611       modname_utf16[0] = '\0';
612     char module_name[kMaxPathLength];
613     int module_name_len =
614         ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
615                               &module_name[0], kMaxPathLength, NULL, NULL);
616     module_name[module_name_len] = '\0';
617 
618     uptr base_address = (uptr)mi.lpBaseOfDll;
619     uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
620 
621     // Adjust the base address of the module so that we get a VA instead of an
622     // RVA when computing the module offset. This helps llvm-symbolizer find the
623     // right DWARF CU. In the common case that the image is loaded at it's
624     // preferred address, we will now print normal virtual addresses.
625     uptr preferred_base = GetPreferredBase(&module_name[0]);
626     uptr adjusted_base = base_address - preferred_base;
627 
628     LoadedModule cur_module;
629     cur_module.set(module_name, adjusted_base);
630     // We add the whole module as one single address range.
631     cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
632                                /*writable*/ true);
633     modules_.push_back(cur_module);
634   }
635   UnmapOrDie(hmodules, modules_buffer_size);
636 }
637 
fallbackInit()638 void ListOfModules::fallbackInit() { clear(); }
639 
640 // We can't use atexit() directly at __asan_init time as the CRT is not fully
641 // initialized at this point.  Place the functions into a vector and use
642 // atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
643 InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
644 
Atexit(void (* function)(void))645 int Atexit(void (*function)(void)) {
646   atexit_functions.push_back(function);
647   return 0;
648 }
649 
RunAtexit()650 static int RunAtexit() {
651   int ret = 0;
652   for (uptr i = 0; i < atexit_functions.size(); ++i) {
653     ret |= atexit(atexit_functions[i]);
654   }
655   return ret;
656 }
657 
658 #pragma section(".CRT$XID", long, read)  // NOLINT
659 __declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
660 #endif
661 
662 // ------------------ sanitizer_libc.h
OpenFile(const char * filename,FileAccessMode mode,error_t * last_error)663 fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
664   // FIXME: Use the wide variants to handle Unicode filenames.
665   fd_t res;
666   if (mode == RdOnly) {
667     res = CreateFileA(filename, GENERIC_READ,
668                       FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
669                       nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
670   } else if (mode == WrOnly) {
671     res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
672                       FILE_ATTRIBUTE_NORMAL, nullptr);
673   } else {
674     UNIMPLEMENTED();
675   }
676   CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
677   CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
678   if (res == kInvalidFd && last_error)
679     *last_error = GetLastError();
680   return res;
681 }
682 
CloseFile(fd_t fd)683 void CloseFile(fd_t fd) {
684   CloseHandle(fd);
685 }
686 
ReadFromFile(fd_t fd,void * buff,uptr buff_size,uptr * bytes_read,error_t * error_p)687 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
688                   error_t *error_p) {
689   CHECK(fd != kInvalidFd);
690 
691   // bytes_read can't be passed directly to ReadFile:
692   // uptr is unsigned long long on 64-bit Windows.
693   unsigned long num_read_long;
694 
695   bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
696   if (!success && error_p)
697     *error_p = GetLastError();
698   if (bytes_read)
699     *bytes_read = num_read_long;
700   return success;
701 }
702 
SupportsColoredOutput(fd_t fd)703 bool SupportsColoredOutput(fd_t fd) {
704   // FIXME: support colored output.
705   return false;
706 }
707 
WriteToFile(fd_t fd,const void * buff,uptr buff_size,uptr * bytes_written,error_t * error_p)708 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
709                  error_t *error_p) {
710   CHECK(fd != kInvalidFd);
711 
712   // Handle null optional parameters.
713   error_t dummy_error;
714   error_p = error_p ? error_p : &dummy_error;
715   uptr dummy_bytes_written;
716   bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
717 
718   // Initialize output parameters in case we fail.
719   *error_p = 0;
720   *bytes_written = 0;
721 
722   // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
723   // closed, in which case this will fail.
724   if (fd == kStdoutFd || fd == kStderrFd) {
725     fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
726     if (fd == 0) {
727       *error_p = ERROR_INVALID_HANDLE;
728       return false;
729     }
730   }
731 
732   DWORD bytes_written_32;
733   if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
734     *error_p = GetLastError();
735     return false;
736   } else {
737     *bytes_written = bytes_written_32;
738     return true;
739   }
740 }
741 
RenameFile(const char * oldpath,const char * newpath,error_t * error_p)742 bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
743   UNIMPLEMENTED();
744 }
745 
internal_sched_yield()746 uptr internal_sched_yield() {
747   Sleep(0);
748   return 0;
749 }
750 
internal__exit(int exitcode)751 void internal__exit(int exitcode) {
752   // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
753   // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
754   // so add our own breakpoint here.
755   if (::IsDebuggerPresent())
756     __debugbreak();
757   TerminateProcess(GetCurrentProcess(), exitcode);
758   BUILTIN_UNREACHABLE();
759 }
760 
internal_ftruncate(fd_t fd,uptr size)761 uptr internal_ftruncate(fd_t fd, uptr size) {
762   UNIMPLEMENTED();
763 }
764 
GetRSS()765 uptr GetRSS() {
766   PROCESS_MEMORY_COUNTERS counters;
767   if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))
768     return 0;
769   return counters.WorkingSetSize;
770 }
771 
internal_start_thread(void (* func)(void * arg),void * arg)772 void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
internal_join_thread(void * th)773 void internal_join_thread(void *th) { }
774 
775 // ---------------------- BlockingMutex ---------------- {{{1
776 
BlockingMutex()777 BlockingMutex::BlockingMutex() {
778   CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
779   internal_memset(this, 0, sizeof(*this));
780 }
781 
Lock()782 void BlockingMutex::Lock() {
783   AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
784   CHECK_EQ(owner_, 0);
785   owner_ = GetThreadSelf();
786 }
787 
Unlock()788 void BlockingMutex::Unlock() {
789   CheckLocked();
790   owner_ = 0;
791   ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
792 }
793 
CheckLocked()794 void BlockingMutex::CheckLocked() {
795   CHECK_EQ(owner_, GetThreadSelf());
796 }
797 
GetTlsSize()798 uptr GetTlsSize() {
799   return 0;
800 }
801 
InitTlsSize()802 void InitTlsSize() {
803 }
804 
GetThreadStackAndTls(bool main,uptr * stk_addr,uptr * stk_size,uptr * tls_addr,uptr * tls_size)805 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
806                           uptr *tls_addr, uptr *tls_size) {
807 #if SANITIZER_GO
808   *stk_addr = 0;
809   *stk_size = 0;
810   *tls_addr = 0;
811   *tls_size = 0;
812 #else
813   uptr stack_top, stack_bottom;
814   GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
815   *stk_addr = stack_bottom;
816   *stk_size = stack_top - stack_bottom;
817   *tls_addr = 0;
818   *tls_size = 0;
819 #endif
820 }
821 
Write(const char * buffer,uptr length)822 void ReportFile::Write(const char *buffer, uptr length) {
823   SpinMutexLock l(mu);
824   ReopenIfNecessary();
825   if (!WriteToFile(fd, buffer, length)) {
826     // stderr may be closed, but we may be able to print to the debugger
827     // instead.  This is the case when launching a program from Visual Studio,
828     // and the following routine should write to its console.
829     OutputDebugStringA(buffer);
830   }
831 }
832 
SetAlternateSignalStack()833 void SetAlternateSignalStack() {
834   // FIXME: Decide what to do on Windows.
835 }
836 
UnsetAlternateSignalStack()837 void UnsetAlternateSignalStack() {
838   // FIXME: Decide what to do on Windows.
839 }
840 
InstallDeadlySignalHandlers(SignalHandlerType handler)841 void InstallDeadlySignalHandlers(SignalHandlerType handler) {
842   (void)handler;
843   // FIXME: Decide what to do on Windows.
844 }
845 
GetHandleSignalMode(int signum)846 HandleSignalMode GetHandleSignalMode(int signum) {
847   // FIXME: Decide what to do on Windows.
848   return kHandleSignalNo;
849 }
850 
851 // Check based on flags if we should handle this exception.
IsHandledDeadlyException(DWORD exceptionCode)852 bool IsHandledDeadlyException(DWORD exceptionCode) {
853   switch (exceptionCode) {
854     case EXCEPTION_ACCESS_VIOLATION:
855     case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
856     case EXCEPTION_STACK_OVERFLOW:
857     case EXCEPTION_DATATYPE_MISALIGNMENT:
858     case EXCEPTION_IN_PAGE_ERROR:
859       return common_flags()->handle_segv;
860     case EXCEPTION_ILLEGAL_INSTRUCTION:
861     case EXCEPTION_PRIV_INSTRUCTION:
862     case EXCEPTION_BREAKPOINT:
863       return common_flags()->handle_sigill;
864     case EXCEPTION_FLT_DENORMAL_OPERAND:
865     case EXCEPTION_FLT_DIVIDE_BY_ZERO:
866     case EXCEPTION_FLT_INEXACT_RESULT:
867     case EXCEPTION_FLT_INVALID_OPERATION:
868     case EXCEPTION_FLT_OVERFLOW:
869     case EXCEPTION_FLT_STACK_CHECK:
870     case EXCEPTION_FLT_UNDERFLOW:
871     case EXCEPTION_INT_DIVIDE_BY_ZERO:
872     case EXCEPTION_INT_OVERFLOW:
873       return common_flags()->handle_sigfpe;
874   }
875   return false;
876 }
877 
IsAccessibleMemoryRange(uptr beg,uptr size)878 bool IsAccessibleMemoryRange(uptr beg, uptr size) {
879   SYSTEM_INFO si;
880   GetNativeSystemInfo(&si);
881   uptr page_size = si.dwPageSize;
882   uptr page_mask = ~(page_size - 1);
883 
884   for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
885        page <= end;) {
886     MEMORY_BASIC_INFORMATION info;
887     if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
888       return false;
889 
890     if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
891         info.Protect == PAGE_EXECUTE)
892       return false;
893 
894     if (info.RegionSize == 0)
895       return false;
896 
897     page += info.RegionSize;
898   }
899 
900   return true;
901 }
902 
IsStackOverflow() const903 bool SignalContext::IsStackOverflow() const {
904   return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
905 }
906 
InitPcSpBp()907 void SignalContext::InitPcSpBp() {
908   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
909   CONTEXT *context_record = (CONTEXT *)context;
910 
911   pc = (uptr)exception_record->ExceptionAddress;
912 #ifdef _WIN64
913   bp = (uptr)context_record->Rbp;
914   sp = (uptr)context_record->Rsp;
915 #else
916   bp = (uptr)context_record->Ebp;
917   sp = (uptr)context_record->Esp;
918 #endif
919 }
920 
GetAddress() const921 uptr SignalContext::GetAddress() const {
922   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
923   return exception_record->ExceptionInformation[1];
924 }
925 
IsMemoryAccess() const926 bool SignalContext::IsMemoryAccess() const {
927   return GetWriteFlag() != SignalContext::UNKNOWN;
928 }
929 
GetWriteFlag() const930 SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
931   EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
932   // The contents of this array are documented at
933   // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
934   // The first element indicates read as 0, write as 1, or execute as 8.  The
935   // second element is the faulting address.
936   switch (exception_record->ExceptionInformation[0]) {
937     case 0:
938       return SignalContext::READ;
939     case 1:
940       return SignalContext::WRITE;
941     case 8:
942       return SignalContext::UNKNOWN;
943   }
944   return SignalContext::UNKNOWN;
945 }
946 
DumpAllRegisters(void * context)947 void SignalContext::DumpAllRegisters(void *context) {
948   // FIXME: Implement this.
949 }
950 
GetType() const951 int SignalContext::GetType() const {
952   return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
953 }
954 
Describe() const955 const char *SignalContext::Describe() const {
956   unsigned code = GetType();
957   // Get the string description of the exception if this is a known deadly
958   // exception.
959   switch (code) {
960     case EXCEPTION_ACCESS_VIOLATION:
961       return "access-violation";
962     case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
963       return "array-bounds-exceeded";
964     case EXCEPTION_STACK_OVERFLOW:
965       return "stack-overflow";
966     case EXCEPTION_DATATYPE_MISALIGNMENT:
967       return "datatype-misalignment";
968     case EXCEPTION_IN_PAGE_ERROR:
969       return "in-page-error";
970     case EXCEPTION_ILLEGAL_INSTRUCTION:
971       return "illegal-instruction";
972     case EXCEPTION_PRIV_INSTRUCTION:
973       return "priv-instruction";
974     case EXCEPTION_BREAKPOINT:
975       return "breakpoint";
976     case EXCEPTION_FLT_DENORMAL_OPERAND:
977       return "flt-denormal-operand";
978     case EXCEPTION_FLT_DIVIDE_BY_ZERO:
979       return "flt-divide-by-zero";
980     case EXCEPTION_FLT_INEXACT_RESULT:
981       return "flt-inexact-result";
982     case EXCEPTION_FLT_INVALID_OPERATION:
983       return "flt-invalid-operation";
984     case EXCEPTION_FLT_OVERFLOW:
985       return "flt-overflow";
986     case EXCEPTION_FLT_STACK_CHECK:
987       return "flt-stack-check";
988     case EXCEPTION_FLT_UNDERFLOW:
989       return "flt-underflow";
990     case EXCEPTION_INT_DIVIDE_BY_ZERO:
991       return "int-divide-by-zero";
992     case EXCEPTION_INT_OVERFLOW:
993       return "int-overflow";
994   }
995   return "unknown exception";
996 }
997 
ReadBinaryName(char * buf,uptr buf_len)998 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
999   // FIXME: Actually implement this function.
1000   CHECK_GT(buf_len, 0);
1001   buf[0] = 0;
1002   return 0;
1003 }
1004 
ReadLongProcessName(char * buf,uptr buf_len)1005 uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
1006   return ReadBinaryName(buf, buf_len);
1007 }
1008 
CheckVMASize()1009 void CheckVMASize() {
1010   // Do nothing.
1011 }
1012 
MaybeReexec()1013 void MaybeReexec() {
1014   // No need to re-exec on Windows.
1015 }
1016 
CheckASLR()1017 void CheckASLR() {
1018   // Do nothing
1019 }
1020 
GetArgv()1021 char **GetArgv() {
1022   // FIXME: Actually implement this function.
1023   return 0;
1024 }
1025 
StartSubprocess(const char * program,const char * const argv[],fd_t stdin_fd,fd_t stdout_fd,fd_t stderr_fd)1026 pid_t StartSubprocess(const char *program, const char *const argv[],
1027                       fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
1028   // FIXME: implement on this platform
1029   // Should be implemented based on
1030   // SymbolizerProcess::StarAtSymbolizerSubprocess
1031   // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
1032   return -1;
1033 }
1034 
IsProcessRunning(pid_t pid)1035 bool IsProcessRunning(pid_t pid) {
1036   // FIXME: implement on this platform.
1037   return false;
1038 }
1039 
WaitForProcess(pid_t pid)1040 int WaitForProcess(pid_t pid) { return -1; }
1041 
1042 // FIXME implement on this platform.
GetMemoryProfile(fill_profile_f cb,uptr * stats,uptr stats_size)1043 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
1044 
CheckNoDeepBind(const char * filename,int flag)1045 void CheckNoDeepBind(const char *filename, int flag) {
1046   // Do nothing.
1047 }
1048 
1049 // FIXME: implement on this platform.
GetRandom(void * buffer,uptr length,bool blocking)1050 bool GetRandom(void *buffer, uptr length, bool blocking) {
1051   UNIMPLEMENTED();
1052 }
1053 
GetNumberOfCPUs()1054 u32 GetNumberOfCPUs() {
1055   SYSTEM_INFO sysinfo = {};
1056   GetNativeSystemInfo(&sysinfo);
1057   return sysinfo.dwNumberOfProcessors;
1058 }
1059 
1060 }  // namespace __sanitizer
1061 
1062 #endif  // _WIN32
1063