blob: b1a2a53a3fbf666a4cd843a4f0c93b3c11d1467f [file] [log] [blame]
Alexey Samsonov298dd7c2012-06-05 07:46:31 +00001//===-- sanitizer_win.cc --------------------------------------------------===//
Alexey Samsonovdde1f112012-06-05 07:05:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements windows-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
Evgeniy Stepanov0af67232013-03-19 14:33:38 +000014
15#include "sanitizer_platform.h"
16#if SANITIZER_WINDOWS
17
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +000018#define WIN32_LEAN_AND_MEAN
19#define NOGDI
Alexey Samsonovdde1f112012-06-05 07:05:10 +000020#include <windows.h>
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +000021#include <io.h>
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +000022#include <psapi.h>
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +000023#include <stdlib.h>
Alexey Samsonovdde1f112012-06-05 07:05:10 +000024
Alexey Samsonov201aa362012-06-06 09:43:32 +000025#include "sanitizer_common.h"
Reid Klecknere1e344e2016-11-14 17:37:50 +000026#include "sanitizer_dbghelp.h"
Alexey Samsonovdde1f112012-06-05 07:05:10 +000027#include "sanitizer_libc.h"
Dmitry Vyukovff198092013-01-14 14:28:06 +000028#include "sanitizer_mutex.h"
Sergey Matveevaf179b82013-05-08 12:45:55 +000029#include "sanitizer_placement_new.h"
Kostya Serebryany395386f2016-07-21 21:38:40 +000030#include "sanitizer_procmaps.h"
Sergey Matveevaf179b82013-05-08 12:45:55 +000031#include "sanitizer_stacktrace.h"
Etienne Bergeron3d89db42016-07-15 17:16:37 +000032#include "sanitizer_symbolizer.h"
Marcos Pividori74694b12017-01-20 21:09:36 +000033#include "sanitizer_win_defs.h"
Alexey Samsonovdde1f112012-06-05 07:05:10 +000034
Bob Haarman71a73232017-01-05 00:37:13 +000035// A macro to tell the compiler that this part of the code cannot be reached,
36// if the compiler supports this feature. Since we're using this in
37// code that is called when terminating the process, the expansion of the
38// macro should not terminate the process to avoid infinite recursion.
39#if defined(__clang__)
40# define BUILTIN_UNREACHABLE() __builtin_unreachable()
Bob Haarmanf04df852017-01-05 01:35:38 +000041#elif defined(__GNUC__) && \
42 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
Bob Haarman71a73232017-01-05 00:37:13 +000043# define BUILTIN_UNREACHABLE() __builtin_unreachable()
44#elif defined(_MSC_VER)
45# define BUILTIN_UNREACHABLE() __assume(0)
46#else
47# define BUILTIN_UNREACHABLE()
48#endif
49
Alexey Samsonovdde1f112012-06-05 07:05:10 +000050namespace __sanitizer {
51
Peter Collingbourne6f4be192013-05-08 14:43:49 +000052#include "sanitizer_syscall_generic.inc"
53
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000054// --------------------- sanitizer_common.h
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000055uptr GetPageSize() {
Reid Kleckner0765fbc2016-02-18 17:58:22 +000056 SYSTEM_INFO si;
57 GetSystemInfo(&si);
58 return si.dwPageSize;
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000059}
60
61uptr GetMmapGranularity() {
Reid Kleckner0765fbc2016-02-18 17:58:22 +000062 SYSTEM_INFO si;
63 GetSystemInfo(&si);
64 return si.dwAllocationGranularity;
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000065}
66
Timur Iskhodzhanov4245f782013-07-16 09:47:39 +000067uptr GetMaxVirtualAddress() {
68 SYSTEM_INFO si;
69 GetSystemInfo(&si);
70 return (uptr)si.lpMaximumApplicationAddress;
71}
72
Alexey Samsonovae9b18b2012-11-09 14:45:30 +000073bool FileExists(const char *filename) {
Reid Klecknere96833e2015-08-10 23:40:27 +000074 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
Alexey Samsonovae9b18b2012-11-09 14:45:30 +000075}
76
Peter Collingbourneffaf2ea2013-05-17 16:56:53 +000077uptr internal_getpid() {
Alexey Samsonovee072902012-06-06 09:26:25 +000078 return GetProcessId(GetCurrentProcess());
79}
80
Timur Iskhodzhanov2dee3dd2013-03-25 22:04:29 +000081// In contrast to POSIX, on Windows GetCurrentThreadId()
82// returns a system-unique identifier.
83uptr GetTid() {
Alexey Samsonov70afb912012-06-15 06:37:34 +000084 return GetCurrentThreadId();
85}
86
Timur Iskhodzhanov2dee3dd2013-03-25 22:04:29 +000087uptr GetThreadSelf() {
88 return GetTid();
89}
90
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +000091#if !SANITIZER_GO
Alexey Samsonovcf4d3a02012-06-07 07:32:00 +000092void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000093 uptr *stack_bottom) {
94 CHECK(stack_top);
95 CHECK(stack_bottom);
96 MEMORY_BASIC_INFORMATION mbi;
Kostya Serebryany98390d02012-06-20 15:19:17 +000097 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000098 // FIXME: is it possible for the stack to not be a single allocation?
99 // Are these values what ASan expects to get (reserved, not committed;
100 // including stack guard page) ?
101 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
102 *stack_bottom = (uptr)mbi.AllocationBase;
103}
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000104#endif // #if !SANITIZER_GO
Alexey Samsonov4b1f1032012-06-07 07:13:46 +0000105
Anna Zaks8d225202015-11-20 18:42:01 +0000106void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
Alexey Samsonovee072902012-06-06 09:26:25 +0000107 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
Reid Kleckner7d0f22e2015-08-12 23:55:38 +0000108 if (rv == 0)
Anna Zaks8d225202015-11-20 18:42:01 +0000109 ReportMmapFailureAndDie(size, mem_type, "allocate",
110 GetLastError(), raw_report);
Alexey Samsonovee072902012-06-06 09:26:25 +0000111 return rv;
112}
113
114void UnmapOrDie(void *addr, uptr size) {
Timur Iskhodzhanov89608af2015-03-31 16:39:20 +0000115 if (!size || !addr)
116 return;
117
Reid Klecknerad049142016-03-10 20:47:26 +0000118 MEMORY_BASIC_INFORMATION mbi;
119 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
Reid Klecknerad049142016-03-10 20:47:26 +0000120
Dmitry Vyukov914b3992016-04-27 15:55:05 +0000121 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
122 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
123 // fails try MEM_DECOMMIT.
Reid Klecknerad049142016-03-10 20:47:26 +0000124 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
Dmitry Vyukov914b3992016-04-27 15:55:05 +0000125 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
126 Report("ERROR: %s failed to "
127 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
128 SanitizerToolName, size, size, addr, GetLastError());
129 CHECK("unable to unmap" && 0);
130 }
Alexey Samsonove95e29c2012-06-06 15:47:40 +0000131 }
Alexey Samsonovee072902012-06-06 09:26:25 +0000132}
133
Reid Klecknerad049142016-03-10 20:47:26 +0000134// We want to map a chunk of address space aligned to 'alignment'.
135void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
136 CHECK(IsPowerOfTwo(size));
137 CHECK(IsPowerOfTwo(alignment));
138
139 // Windows will align our allocations to at least 64K.
140 alignment = Max(alignment, GetMmapGranularity());
141
142 uptr mapped_addr =
143 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
144 if (!mapped_addr)
145 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
146
147 // If we got it right on the first try, return. Otherwise, unmap it and go to
148 // the slow path.
149 if (IsAligned(mapped_addr, alignment))
150 return (void*)mapped_addr;
151 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
152 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
153
154 // If we didn't get an aligned address, overallocate, find an aligned address,
155 // unmap, and try to allocate at that aligned address.
156 int retries = 0;
157 const int kMaxRetries = 10;
158 for (; retries < kMaxRetries &&
159 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
160 retries++) {
161 // Overallocate size + alignment bytes.
162 mapped_addr =
163 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
164 if (!mapped_addr)
165 ReportMmapFailureAndDie(size, mem_type, "allocate aligned",
166 GetLastError());
167
168 // Find the aligned address.
169 uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
170
171 // Free the overallocation.
172 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
173 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
174
175 // Attempt to allocate exactly the number of bytes we need at the aligned
176 // address. This may fail for a number of reasons, in which case we continue
177 // the loop.
178 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
179 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
180 }
181
182 // Fail if we can't make this work quickly.
183 if (retries == kMaxRetries && mapped_addr == 0)
184 ReportMmapFailureAndDie(size, mem_type, "allocate aligned", GetLastError());
185
186 return (void *)mapped_addr;
187}
188
Evgeniy Stepanov8e9c70b2015-05-29 22:31:28 +0000189void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
Kostya Serebryany98066282012-12-13 05:36:00 +0000190 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
191 // but on Win64 it does.
Etienne Bergeron1128db82016-07-11 21:40:59 +0000192 (void)name; // unsupported
Dmitry Vyukov080dcf72016-07-20 12:50:49 +0000193#if !SANITIZER_GO && SANITIZER_WINDOWS64
194 // On asan/Windows64, use MEM_COMMIT would result in error
Etienne Bergeron1128db82016-07-11 21:40:59 +0000195 // 1455:ERROR_COMMITMENT_LIMIT.
Dmitry Vyukov080dcf72016-07-20 12:50:49 +0000196 // Asan uses exception handler to commit page on demand.
Etienne Bergeron1128db82016-07-11 21:40:59 +0000197 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
198#else
199 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
200 PAGE_READWRITE);
201#endif
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000202 if (p == 0)
Timur Iskhodzhanov364b8b82014-03-19 08:23:00 +0000203 Report("ERROR: %s failed to "
204 "allocate %p (%zd) bytes at %p (error code: %d)\n",
205 SanitizerToolName, size, size, fixed_addr, GetLastError());
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000206 return p;
Alexey Samsonovc70d1082012-06-14 14:42:58 +0000207}
208
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000209// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
210// 'MmapFixedNoAccess'.
Kostya Serebryany98066282012-12-13 05:36:00 +0000211void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
Reid Klecknerad049142016-03-10 20:47:26 +0000212 void *p = VirtualAlloc((LPVOID)fixed_addr, size,
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000213 MEM_COMMIT, PAGE_READWRITE);
Reid Klecknerad049142016-03-10 20:47:26 +0000214 if (p == 0) {
215 char mem_type[30];
216 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
217 fixed_addr);
218 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
219 }
220 return p;
Kostya Serebryany98066282012-12-13 05:36:00 +0000221}
222
Kostya Serebryany57bfdb02013-12-13 15:03:49 +0000223void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
224 // FIXME: make this really NoReserve?
225 return MmapOrDie(size, mem_type);
226}
227
Kostya Serebryany99ed6052016-04-22 23:46:53 +0000228void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
Evgeniy Stepanov8e9c70b2015-05-29 22:31:28 +0000229 (void)name; // unsupported
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000230 void *res = VirtualAlloc((LPVOID)fixed_addr, size,
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000231 MEM_RESERVE, PAGE_NOACCESS);
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000232 if (res == 0)
233 Report("WARNING: %s failed to "
234 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
235 SanitizerToolName, size, size, fixed_addr, GetLastError());
236 return res;
Alexey Samsonovc70d1082012-06-14 14:42:58 +0000237}
238
Kostya Serebryany3884f1a2016-04-23 00:05:24 +0000239void *MmapNoAccess(uptr size) {
Etienne Bergeron27eb6d52016-08-04 18:15:38 +0000240 void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
241 if (res == 0)
242 Report("WARNING: %s failed to "
243 "mprotect %p (%zd) bytes (error code: %d)\n",
244 SanitizerToolName, size, size, GetLastError());
245 return res;
Kostya Serebryany3884f1a2016-04-23 00:05:24 +0000246}
247
Timur Iskhodzhanovea1f3322015-04-10 15:02:19 +0000248bool MprotectNoAccess(uptr addr, uptr size) {
249 DWORD old_protection;
250 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
251}
252
Evgeniy Stepanove109ef82016-11-30 20:41:59 +0000253void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
Timur Iskhodzhanov167f9e42013-02-08 12:02:00 +0000254 // This is almost useless on 32-bits.
Yury Gribov8f848ff2015-02-03 10:15:15 +0000255 // FIXME: add madvise-analog when we move to 64-bits.
Timur Iskhodzhanov167f9e42013-02-08 12:02:00 +0000256}
257
Kostya Serebryanyc6338ac2015-01-21 02:05:31 +0000258void NoHugePagesInRegion(uptr addr, uptr size) {
Kostya Serebryanyb72479b2016-08-26 23:58:42 +0000259 // FIXME: probably similar to ReleaseMemoryToOS.
Kostya Serebryanyc6338ac2015-01-21 02:05:31 +0000260}
261
Yury Gribov8f848ff2015-02-03 10:15:15 +0000262void DontDumpShadowMemory(uptr addr, uptr length) {
263 // This is almost useless on 32-bits.
264 // FIXME: add madvise-analog when we move to 64-bits.
265}
266
Etienne Bergeronc07e5762016-09-30 17:47:34 +0000267uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding) {
268 uptr address = 0;
269 while (true) {
270 MEMORY_BASIC_INFORMATION info;
271 if (!::VirtualQuery((void*)address, &info, sizeof(info)))
272 return 0;
273
274 if (info.State == MEM_FREE) {
275 uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
276 alignment);
277 if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
278 return shadow_address;
279 }
280
281 // Move to the next region.
282 address = (uptr)info.BaseAddress + info.RegionSize;
283 }
284 return 0;
285}
286
Alexey Samsonov40e51282012-06-15 07:29:14 +0000287bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000288 MEMORY_BASIC_INFORMATION mbi;
289 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000290 return mbi.Protect == PAGE_NOACCESS &&
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000291 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
Alexey Samsonov40e51282012-06-15 07:29:14 +0000292}
293
Alexey Samsonov961276a2012-07-03 08:24:14 +0000294void *MapFileToMemory(const char *file_name, uptr *buff_size) {
295 UNIMPLEMENTED();
296}
297
Daniel Sandersadf1fcc2015-07-31 11:29:25 +0000298void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
Evgeniy Stepanova00ff192014-05-28 08:26:24 +0000299 UNIMPLEMENTED();
300}
301
Alexey Samsonov7d238542013-03-14 11:29:06 +0000302static const int kMaxEnvNameLength = 128;
Dmitry Vyukove979c542013-06-10 10:02:02 +0000303static const DWORD kMaxEnvValueLength = 32767;
Alexey Samsonov83e76222013-03-14 11:10:23 +0000304
305namespace {
306
307struct EnvVariable {
308 char name[kMaxEnvNameLength];
309 char value[kMaxEnvValueLength];
310};
311
312} // namespace
313
314static const int kEnvVariables = 5;
315static EnvVariable env_vars[kEnvVariables];
316static int num_env_vars;
317
Alexey Samsonov0c53a382012-06-14 14:07:21 +0000318const char *GetEnv(const char *name) {
Alexey Samsonov83e76222013-03-14 11:10:23 +0000319 // Note: this implementation caches the values of the environment variables
320 // and limits their quantity.
321 for (int i = 0; i < num_env_vars; i++) {
322 if (0 == internal_strcmp(name, env_vars[i].name))
323 return env_vars[i].value;
324 }
325 CHECK_LT(num_env_vars, kEnvVariables);
326 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
327 kMaxEnvValueLength);
328 if (rv > 0 && rv < kMaxEnvValueLength) {
329 CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
330 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
331 num_env_vars++;
332 return env_vars[num_env_vars - 1].value;
333 }
Alexey Samsonov0c53a382012-06-14 14:07:21 +0000334 return 0;
335}
336
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000337const char *GetPwd() {
338 UNIMPLEMENTED();
339}
340
Alexey Samsonov9211bd32013-02-18 07:17:12 +0000341u32 GetUid() {
342 UNIMPLEMENTED();
343}
344
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000345namespace {
346struct ModuleInfo {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000347 const char *filepath;
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000348 uptr base_address;
349 uptr end_address;
350};
351
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000352#if !SANITIZER_GO
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000353int CompareModulesBase(const void *pl, const void *pr) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000354 const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
355 if (l->base_address < r->base_address)
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000356 return -1;
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000357 return l->base_address > r->base_address;
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000358}
Dmitry Vyukov8455cf02015-11-12 16:29:24 +0000359#endif
Timur Iskhodzhanova023e062014-12-30 15:30:19 +0000360} // namespace
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000361
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000362#if !SANITIZER_GO
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000363void DumpProcessMap() {
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000364 Report("Dumping process modules:\n");
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000365 ListOfModules modules;
366 modules.init();
367 uptr num_modules = modules.size();
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000368
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000369 InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
370 for (size_t i = 0; i < num_modules; ++i) {
371 module_infos[i].filepath = modules[i].full_name();
Reid Kleckner628d6b52016-08-05 17:55:00 +0000372 module_infos[i].base_address = modules[i].ranges().front()->beg;
373 module_infos[i].end_address = modules[i].ranges().back()->end;
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000374 }
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000375 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
376 CompareModulesBase);
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000377
378 for (size_t i = 0; i < num_modules; ++i) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000379 const ModuleInfo &mi = module_infos[i];
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000380 if (mi.end_address != 0) {
381 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000382 mi.filepath[0] ? mi.filepath : "[no name]");
Timur Iskhodzhanov1931b622015-04-06 12:54:06 +0000383 } else if (mi.filepath[0]) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000384 Printf("\t??\?-??? %s\n", mi.filepath);
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000385 } else {
386 Printf("\t???\n");
387 }
388 }
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000389}
Dmitry Vyukovb3381fa2015-02-16 13:51:17 +0000390#endif
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000391
Kuba Mracekb6c6eaf2017-01-06 20:57:47 +0000392void PrintModuleMap() { }
393
Alexey Samsonov34e2b282014-08-12 22:31:19 +0000394void DisableCoreDumperIfNecessary() {
Timur Iskhodzhanov7d5c81d2014-05-06 08:21:50 +0000395 // Do nothing.
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000396}
397
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000398void ReExec() {
399 UNIMPLEMENTED();
400}
401
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000402void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
Timur Iskhodzhanovd0be0a92015-04-23 12:58:11 +0000403#if !SANITIZER_GO
404 CovPrepareForSandboxing(args);
405#endif
Alexander Potapenko1746f552012-12-10 13:10:40 +0000406}
407
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000408bool StackSizeIsUnlimited() {
409 UNIMPLEMENTED();
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000410}
411
412void SetStackSizeLimitInBytes(uptr limit) {
413 UNIMPLEMENTED();
414}
415
Alexey Samsonov34e2b282014-08-12 22:31:19 +0000416bool AddressSpaceIsUnlimited() {
417 UNIMPLEMENTED();
418}
419
420void SetAddressSpaceUnlimited() {
421 UNIMPLEMENTED();
422}
423
Anna Zaks22490492015-02-27 03:12:19 +0000424bool IsPathSeparator(const char c) {
425 return c == '\\' || c == '/';
426}
427
428bool IsAbsolutePath(const char *path) {
429 UNIMPLEMENTED();
430}
431
Alexey Samsonov70afb912012-06-15 06:37:34 +0000432void SleepForSeconds(int seconds) {
433 Sleep(seconds * 1000);
434}
435
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000436void SleepForMillis(int millis) {
437 Sleep(millis);
438}
439
Dmitry Vyukove979c542013-06-10 10:02:02 +0000440u64 NanoTime() {
441 return 0;
442}
443
Alexey Samsonov70afb912012-06-15 06:37:34 +0000444void Abort() {
Timur Iskhodzhanovb8373bc2014-12-26 12:25:54 +0000445 internal__exit(3);
Alexey Samsonov70afb912012-06-15 06:37:34 +0000446}
447
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000448#if !SANITIZER_GO
Reid Kleckner646386e2015-08-03 19:51:18 +0000449// Read the file to extract the ImageBase field from the PE header. If ASLR is
450// disabled and this virtual address is available, the loader will typically
451// load the image at this address. Therefore, we call it the preferred base. Any
452// addresses in the DWARF typically assume that the object has been loaded at
453// this address.
454static uptr GetPreferredBase(const char *modname) {
455 fd_t fd = OpenFile(modname, RdOnly, nullptr);
456 if (fd == kInvalidFd)
457 return 0;
458 FileCloser closer(fd);
459
460 // Read just the DOS header.
461 IMAGE_DOS_HEADER dos_header;
462 uptr bytes_read;
463 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
464 bytes_read != sizeof(dos_header))
465 return 0;
466
467 // The file should start with the right signature.
468 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
469 return 0;
470
471 // The layout at e_lfanew is:
472 // "PE\0\0"
473 // IMAGE_FILE_HEADER
474 // IMAGE_OPTIONAL_HEADER
475 // Seek to e_lfanew and read all that data.
476 char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
477 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
478 INVALID_SET_FILE_POINTER)
479 return 0;
480 if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
481 bytes_read != sizeof(buf))
482 return 0;
483
484 // Check for "PE\0\0" before the PE header.
485 char *pe_sig = &buf[0];
486 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
487 return 0;
488
489 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
490 IMAGE_OPTIONAL_HEADER *pe_header =
491 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
492
493 // Check for more magic in the PE header.
494 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
495 return 0;
496
497 // Finally, return the ImageBase.
498 return (uptr)pe_header->ImageBase;
499}
500
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000501void ListOfModules::init() {
502 clear();
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000503 HANDLE cur_process = GetCurrentProcess();
504
505 // Query the list of modules. Start by assuming there are no more than 256
506 // modules and retry if that's not sufficient.
507 HMODULE *hmodules = 0;
508 uptr modules_buffer_size = sizeof(HMODULE) * 256;
509 DWORD bytes_required;
510 while (!hmodules) {
511 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
512 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
513 &bytes_required));
514 if (bytes_required > modules_buffer_size) {
515 // Either there turned out to be more than 256 hmodules, or new hmodules
516 // could have loaded since the last try. Retry.
517 UnmapOrDie(hmodules, modules_buffer_size);
518 hmodules = 0;
519 modules_buffer_size = bytes_required;
520 }
521 }
522
523 // |num_modules| is the number of modules actually present,
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000524 size_t num_modules = bytes_required / sizeof(HMODULE);
525 for (size_t i = 0; i < num_modules; ++i) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000526 HMODULE handle = hmodules[i];
527 MODULEINFO mi;
528 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
529 continue;
530
Reid Kleckner646386e2015-08-03 19:51:18 +0000531 // Get the UTF-16 path and convert to UTF-8.
532 wchar_t modname_utf16[kMaxPathLength];
533 int modname_utf16_len =
534 GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
535 if (modname_utf16_len == 0)
536 modname_utf16[0] = '\0';
537 char module_name[kMaxPathLength];
538 int module_name_len =
539 ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
540 &module_name[0], kMaxPathLength, NULL, NULL);
541 module_name[module_name_len] = '\0';
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000542
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000543 uptr base_address = (uptr)mi.lpBaseOfDll;
544 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
Reid Kleckner646386e2015-08-03 19:51:18 +0000545
546 // Adjust the base address of the module so that we get a VA instead of an
547 // RVA when computing the module offset. This helps llvm-symbolizer find the
548 // right DWARF CU. In the common case that the image is loaded at it's
549 // preferred address, we will now print normal virtual addresses.
550 uptr preferred_base = GetPreferredBase(&module_name[0]);
551 uptr adjusted_base = base_address - preferred_base;
552
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000553 LoadedModule cur_module;
554 cur_module.set(module_name, adjusted_base);
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000555 // We add the whole module as one single address range.
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000556 cur_module.addAddressRange(base_address, end_address, /*executable*/ true);
557 modules_.push_back(cur_module);
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000558 }
559 UnmapOrDie(hmodules, modules_buffer_size);
Alexey Samsonov7a36e612013-09-10 14:36:16 +0000560};
561
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000562// We can't use atexit() directly at __asan_init time as the CRT is not fully
563// initialized at this point. Place the functions into a vector and use
564// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
565InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
566
Alexey Samsonov70afb912012-06-15 06:37:34 +0000567int Atexit(void (*function)(void)) {
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000568 atexit_functions.push_back(function);
569 return 0;
Alexey Samsonov70afb912012-06-15 06:37:34 +0000570}
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000571
572static int RunAtexit() {
573 int ret = 0;
574 for (uptr i = 0; i < atexit_functions.size(); ++i) {
575 ret |= atexit(atexit_functions[i]);
576 }
577 return ret;
578}
579
580#pragma section(".CRT$XID", long, read) // NOLINT
Reid Klecknere4fabc92015-08-13 16:40:54 +0000581__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000582#endif
Alexey Samsonov70afb912012-06-15 06:37:34 +0000583
Alexey Samsonov4b1f1032012-06-07 07:13:46 +0000584// ------------------ sanitizer_libc.h
Timur Iskhodzhanov864308a2015-04-09 12:37:05 +0000585fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000586 // FIXME: Use the wide variants to handle Unicode filenames.
Reid Kleckner646386e2015-08-03 19:51:18 +0000587 fd_t res;
588 if (mode == RdOnly) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000589 res = CreateFileA(filename, GENERIC_READ,
590 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
591 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000592 } else if (mode == WrOnly) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000593 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
594 FILE_ATTRIBUTE_NORMAL, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000595 } else {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000596 UNIMPLEMENTED();
Reid Kleckner646386e2015-08-03 19:51:18 +0000597 }
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000598 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
599 CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
Timur Iskhodzhanov8a673682015-04-23 12:57:29 +0000600 if (res == kInvalidFd && last_error)
601 *last_error = GetLastError();
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000602 return res;
Alexey Samsonov03c8b842012-06-05 08:32:53 +0000603}
604
Timur Iskhodzhanov864308a2015-04-09 12:37:05 +0000605void CloseFile(fd_t fd) {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000606 CloseHandle(fd);
Alexey Samsonovdde1f112012-06-05 07:05:10 +0000607}
608
Timur Iskhodzhanov2b391692015-04-09 13:38:14 +0000609bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
610 error_t *error_p) {
Reid Kleckner646386e2015-08-03 19:51:18 +0000611 CHECK(fd != kInvalidFd);
Hans Wennborg92e64122015-08-05 17:55:26 +0000612
613 // bytes_read can't be passed directly to ReadFile:
614 // uptr is unsigned long long on 64-bit Windows.
615 unsigned long num_read_long;
616
617 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000618 if (!success && error_p)
619 *error_p = GetLastError();
Hans Wennborg92e64122015-08-05 17:55:26 +0000620 if (bytes_read)
621 *bytes_read = num_read_long;
Reid Kleckner646386e2015-08-03 19:51:18 +0000622 return success;
Timur Iskhodzhanov2b391692015-04-09 13:38:14 +0000623}
624
Timur Iskhodzhanovc2c9ea52015-04-08 17:42:57 +0000625bool SupportsColoredOutput(fd_t fd) {
626 // FIXME: support colored output.
627 return false;
628}
629
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000630bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
631 error_t *error_p) {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000632 CHECK(fd != kInvalidFd);
Timur Iskhodzhanovaeefb6a2014-02-04 23:28:30 +0000633
Reid Klecknere96833e2015-08-10 23:40:27 +0000634 // Handle null optional parameters.
635 error_t dummy_error;
636 error_p = error_p ? error_p : &dummy_error;
637 uptr dummy_bytes_written;
638 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
639
640 // Initialize output parameters in case we fail.
641 *error_p = 0;
642 *bytes_written = 0;
643
644 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
645 // closed, in which case this will fail.
646 if (fd == kStdoutFd || fd == kStderrFd) {
647 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
648 if (fd == 0) {
649 *error_p = ERROR_INVALID_HANDLE;
650 return false;
651 }
Timur Iskhodzhanovaeefb6a2014-02-04 23:28:30 +0000652 }
653
Reid Klecknere96833e2015-08-10 23:40:27 +0000654 DWORD bytes_written_32;
655 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
656 *error_p = GetLastError();
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000657 return false;
658 } else {
Reid Klecknere96833e2015-08-10 23:40:27 +0000659 *bytes_written = bytes_written_32;
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000660 return true;
661 }
Alexey Samsonov03c8b842012-06-05 08:32:53 +0000662}
663
Timur Iskhodzhanova6600a92015-04-09 14:45:17 +0000664bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
665 UNIMPLEMENTED();
666}
667
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000668uptr internal_sched_yield() {
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000669 Sleep(0);
670 return 0;
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000671}
672
Alexey Samsonovaadd1f22013-02-20 13:54:32 +0000673void internal__exit(int exitcode) {
Reid Kleckner76b42612016-11-09 21:27:58 +0000674 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
Reid Kleckner949f9b92016-11-10 20:44:05 +0000675 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
676 // so add our own breakpoint here.
677 if (::IsDebuggerPresent())
678 __debugbreak();
Reid Kleckner2a2bc722016-11-11 17:51:51 +0000679 TerminateProcess(GetCurrentProcess(), exitcode);
Bob Haarman71a73232017-01-05 00:37:13 +0000680 BUILTIN_UNREACHABLE();
Alexey Samsonovaadd1f22013-02-20 13:54:32 +0000681}
682
Evgeniy Stepanova00ff192014-05-28 08:26:24 +0000683uptr internal_ftruncate(fd_t fd, uptr size) {
684 UNIMPLEMENTED();
685}
686
Kostya Serebryany6c54a6b2014-12-09 01:22:59 +0000687uptr GetRSS() {
688 return 0;
689}
690
Kostya Serebryany43eb7732014-12-16 19:13:01 +0000691void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
Hans Wennborg7dd94572014-12-16 20:46:05 +0000692void internal_join_thread(void *th) { }
Kostya Serebryany43eb7732014-12-16 19:13:01 +0000693
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000694// ---------------------- BlockingMutex ---------------- {{{1
Dmitry Vyukovfa67ed42013-02-04 10:42:38 +0000695const uptr LOCK_UNINITIALIZED = 0;
696const uptr LOCK_READY = (uptr)-1;
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000697
698BlockingMutex::BlockingMutex(LinkerInitialized li) {
699 // FIXME: see comments in BlockingMutex::Lock() for the details.
700 CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
701
702 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
703 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
704 owner_ = LOCK_READY;
705}
706
Alexey Samsonova097f7b2013-03-14 13:30:56 +0000707BlockingMutex::BlockingMutex() {
708 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
709 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
710 owner_ = LOCK_READY;
711}
712
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000713void BlockingMutex::Lock() {
714 if (owner_ == LOCK_UNINITIALIZED) {
715 // FIXME: hm, global BlockingMutex objects are not initialized?!?
716 // This might be a side effect of the clang+cl+link Frankenbuild...
717 new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
718
719 // FIXME: If it turns out the linker doesn't invoke our
720 // constructors, we should probably manually Lock/Unlock all the global
721 // locks while we're starting in one thread to avoid double-init races.
722 }
723 EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
Dmitry Vyukov7981ea82013-02-04 08:07:45 +0000724 CHECK_EQ(owner_, LOCK_READY);
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000725 owner_ = GetThreadSelf();
726}
727
728void BlockingMutex::Unlock() {
Dmitry Vyukov7981ea82013-02-04 08:07:45 +0000729 CHECK_EQ(owner_, GetThreadSelf());
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000730 owner_ = LOCK_READY;
731 LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
732}
733
Alexey Samsonovdb7d9652013-03-11 15:45:20 +0000734void BlockingMutex::CheckLocked() {
735 CHECK_EQ(owner_, GetThreadSelf());
736}
737
Evgeniy Stepanov5697b582013-03-13 08:19:53 +0000738uptr GetTlsSize() {
739 return 0;
740}
741
742void InitTlsSize() {
743}
744
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000745void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
746 uptr *tls_addr, uptr *tls_size) {
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000747#if SANITIZER_GO
Dmitry Vyukovb278f122013-06-10 10:30:54 +0000748 *stk_addr = 0;
749 *stk_size = 0;
750 *tls_addr = 0;
751 *tls_size = 0;
752#else
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000753 uptr stack_top, stack_bottom;
754 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
755 *stk_addr = stack_bottom;
756 *stk_size = stack_top - stack_bottom;
757 *tls_addr = 0;
758 *tls_size = 0;
Dmitry Vyukovb278f122013-06-10 10:30:54 +0000759#endif
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000760}
761
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000762#if !SANITIZER_GO
Evgeniy Stepanov8eb82042015-01-22 13:47:12 +0000763void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
Alexey Samsonov3e8467b2014-03-04 12:21:28 +0000764 CHECK_GE(max_depth, 2);
Sergey Matveevaf179b82013-05-08 12:45:55 +0000765 // FIXME: CaptureStackBackTrace might be too slow for us.
766 // FIXME: Compare with StackWalk64.
767 // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
Etienne Bergeron85be3af2016-07-14 22:04:28 +0000768 size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
Timur Iskhodzhanov1f1c7ec2013-11-09 13:59:12 +0000769 (void**)trace, 0);
Timur Iskhodzhanov89a346c2013-12-10 08:30:39 +0000770 if (size == 0)
771 return;
772
Sergey Matveevaf179b82013-05-08 12:45:55 +0000773 // Skip the RTL frames by searching for the PC in the stacktrace.
Timur Iskhodzhanov1f1c7ec2013-11-09 13:59:12 +0000774 uptr pc_location = LocatePcInTrace(pc);
775 PopStackFrames(pc_location);
Sergey Matveevaf179b82013-05-08 12:45:55 +0000776}
777
Alexey Samsonov9c859272014-10-26 03:35:14 +0000778void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
Evgeniy Stepanov8eb82042015-01-22 13:47:12 +0000779 u32 max_depth) {
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000780 CONTEXT ctx = *(CONTEXT *)context;
781 STACKFRAME64 stack_frame;
782 memset(&stack_frame, 0, sizeof(stack_frame));
Etienne Bergeron3d89db42016-07-15 17:16:37 +0000783
Etienne Bergerond4528b22016-07-21 02:32:37 +0000784 InitializeDbgHelpIfNeeded();
785
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000786 size = 0;
787#if defined(_WIN64)
788 int machine_type = IMAGE_FILE_MACHINE_AMD64;
789 stack_frame.AddrPC.Offset = ctx.Rip;
790 stack_frame.AddrFrame.Offset = ctx.Rbp;
791 stack_frame.AddrStack.Offset = ctx.Rsp;
792#else
793 int machine_type = IMAGE_FILE_MACHINE_I386;
794 stack_frame.AddrPC.Offset = ctx.Eip;
795 stack_frame.AddrFrame.Offset = ctx.Ebp;
796 stack_frame.AddrStack.Offset = ctx.Esp;
797#endif
798 stack_frame.AddrPC.Mode = AddrModeFlat;
799 stack_frame.AddrFrame.Mode = AddrModeFlat;
800 stack_frame.AddrStack.Mode = AddrModeFlat;
801 while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
Reid Klecknere1e344e2016-11-14 17:37:50 +0000802 &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
803 SymGetModuleBase64, NULL) &&
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000804 size < Min(max_depth, kStackTraceMax)) {
Hans Wennborgf89a3f862014-10-26 19:27:02 +0000805 trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000806 }
Evgeniy Stepanove5a447d2014-02-11 13:57:17 +0000807}
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000808#endif // #if !SANITIZER_GO
Evgeniy Stepanove5a447d2014-02-11 13:57:17 +0000809
Alexey Samsonov3a41ed12014-12-11 18:30:25 +0000810void ReportFile::Write(const char *buffer, uptr length) {
811 SpinMutexLock l(mu);
812 ReopenIfNecessary();
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000813 if (!WriteToFile(fd, buffer, length)) {
Reid Klecknerd483c072013-09-05 03:19:57 +0000814 // stderr may be closed, but we may be able to print to the debugger
815 // instead. This is the case when launching a program from Visual Studio,
816 // and the following routine should write to its console.
817 OutputDebugStringA(buffer);
818 }
819}
820
Alexander Potapenkod8d490e2014-01-28 11:12:29 +0000821void SetAlternateSignalStack() {
822 // FIXME: Decide what to do on Windows.
823}
824
825void UnsetAlternateSignalStack() {
826 // FIXME: Decide what to do on Windows.
827}
828
Alexander Potapenkoea4a0db2014-01-31 15:11:11 +0000829void InstallDeadlySignalHandlers(SignalHandlerType handler) {
830 (void)handler;
Alexander Potapenko789e3e12014-01-31 13:10:07 +0000831 // FIXME: Decide what to do on Windows.
832}
833
Anna Zaksc77a0802016-02-02 02:01:17 +0000834bool IsHandledDeadlySignal(int signum) {
Alexander Potapenko789e3e12014-01-31 13:10:07 +0000835 // FIXME: Decide what to do on Windows.
836 return false;
837}
838
Marcos Pividoriee221562017-02-02 23:01:51 +0000839// Check based on flags if we should handle this exception.
840bool IsHandledDeadlyException(DWORD exceptionCode) {
841 switch (exceptionCode) {
842 case EXCEPTION_ACCESS_VIOLATION:
843 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
844 case EXCEPTION_STACK_OVERFLOW:
845 case EXCEPTION_DATATYPE_MISALIGNMENT:
846 case EXCEPTION_IN_PAGE_ERROR:
847 return common_flags()->handle_segv;
848 case EXCEPTION_ILLEGAL_INSTRUCTION:
849 case EXCEPTION_PRIV_INSTRUCTION:
850 case EXCEPTION_BREAKPOINT:
851 return common_flags()->handle_sigill;
852 case EXCEPTION_FLT_DENORMAL_OPERAND:
853 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
854 case EXCEPTION_FLT_INEXACT_RESULT:
855 case EXCEPTION_FLT_INVALID_OPERATION:
856 case EXCEPTION_FLT_OVERFLOW:
857 case EXCEPTION_FLT_STACK_CHECK:
858 case EXCEPTION_FLT_UNDERFLOW:
859 case EXCEPTION_INT_DIVIDE_BY_ZERO:
860 case EXCEPTION_INT_OVERFLOW:
861 return common_flags()->handle_sigfpe;
862 }
863 return false;
864}
865
Marcos Pividorife9288a2017-02-02 23:01:59 +0000866const char *DescribeSignalOrException(int signo) {
867 unsigned code = signo;
868 // Get the string description of the exception if this is a known deadly
869 // exception.
870 switch (code) {
871 case EXCEPTION_ACCESS_VIOLATION: return "access-violation";
872 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded";
873 case EXCEPTION_STACK_OVERFLOW: return "stack-overflow";
874 case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment";
875 case EXCEPTION_IN_PAGE_ERROR: return "in-page-error";
876 case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction";
877 case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction";
878 case EXCEPTION_BREAKPOINT: return "breakpoint";
879 case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand";
880 case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero";
881 case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result";
882 case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation";
883 case EXCEPTION_FLT_OVERFLOW: return "flt-overflow";
884 case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check";
885 case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow";
886 case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero";
887 case EXCEPTION_INT_OVERFLOW: return "int-overflow";
888 }
889 return "unknown exception";
890}
891
Alexey Samsonov1947bf92014-09-17 17:56:15 +0000892bool IsAccessibleMemoryRange(uptr beg, uptr size) {
Peter Collingbournea68d90f2015-07-02 22:08:38 +0000893 SYSTEM_INFO si;
894 GetNativeSystemInfo(&si);
895 uptr page_size = si.dwPageSize;
896 uptr page_mask = ~(page_size - 1);
897
898 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
899 page <= end;) {
900 MEMORY_BASIC_INFORMATION info;
901 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
902 return false;
903
904 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
905 info.Protect == PAGE_EXECUTE)
906 return false;
907
908 if (info.RegionSize == 0)
909 return false;
910
911 page += info.RegionSize;
912 }
913
Alexey Samsonov1947bf92014-09-17 17:56:15 +0000914 return true;
915}
916
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000917SignalContext SignalContext::Create(void *siginfo, void *context) {
Evgeniy Stepanovaa42f292016-02-09 00:28:57 +0000918 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
919 CONTEXT *context_record = (CONTEXT *)context;
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000920
921 uptr pc = (uptr)exception_record->ExceptionAddress;
922#ifdef _WIN64
923 uptr bp = (uptr)context_record->Rbp;
924 uptr sp = (uptr)context_record->Rsp;
925#else
926 uptr bp = (uptr)context_record->Ebp;
927 uptr sp = (uptr)context_record->Esp;
928#endif
929 uptr access_addr = exception_record->ExceptionInformation[1];
930
Reid Klecknerceda8832016-02-11 16:44:35 +0000931 // The contents of this array are documented at
932 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
933 // The first element indicates read as 0, write as 1, or execute as 8. The
934 // second element is the faulting address.
935 WriteFlag write_flag = SignalContext::UNKNOWN;
936 switch (exception_record->ExceptionInformation[0]) {
937 case 0: write_flag = SignalContext::READ; break;
938 case 1: write_flag = SignalContext::WRITE; break;
939 case 8: write_flag = SignalContext::UNKNOWN; break;
940 }
941 bool is_memory_access = write_flag != SignalContext::UNKNOWN;
Kostya Serebryany2b9be252016-02-04 02:02:09 +0000942 return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
Evgeniy Stepanov0d7839d2016-02-08 23:01:06 +0000943 write_flag);
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000944}
945
Kuba Mracek073cea62016-11-26 00:50:08 +0000946void SignalContext::DumpAllRegisters(void *context) {
947 // FIXME: Implement this.
948}
949
Yury Gribovc019a572015-06-04 07:29:43 +0000950uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
951 // FIXME: Actually implement this function.
952 CHECK_GT(buf_len, 0);
953 buf[0] = 0;
954 return 0;
955}
956
Evgeniy Stepanov30257172015-07-28 21:01:42 +0000957uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
958 return ReadBinaryName(buf, buf_len);
959}
960
Adhemerval Zanella05636862015-09-11 13:55:00 +0000961void CheckVMASize() {
962 // Do nothing.
963}
964
Kuba Brecka65aa45e2015-12-03 10:39:43 +0000965void MaybeReexec() {
966 // No need to re-exec on Windows.
967}
968
Maxim Ostapenko1965cc62016-01-18 07:55:12 +0000969char **GetArgv() {
970 // FIXME: Actually implement this function.
971 return 0;
972}
973
Mike Aizatsky4a933162016-01-26 20:10:01 +0000974pid_t StartSubprocess(const char *program, const char *const argv[],
975 fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
976 // FIXME: implement on this platform
977 // Should be implemented based on
978 // SymbolizerProcess::StarAtSymbolizerSubprocess
979 // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
980 return -1;
981}
982
983bool IsProcessRunning(pid_t pid) {
984 // FIXME: implement on this platform.
985 return false;
986}
987
Mike Aizatsky26542992016-01-27 23:51:36 +0000988int WaitForProcess(pid_t pid) { return -1; }
989
Kostya Serebryany395386f2016-07-21 21:38:40 +0000990// FIXME implement on this platform.
991void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
992
Maxim Ostapenko76630d42017-03-09 10:47:38 +0000993void CheckNoDeepBind(const char *filename, int flag) {
994 // Do nothing.
995}
Kostya Serebryany395386f2016-07-21 21:38:40 +0000996
Alexey Samsonovdde1f112012-06-05 07:05:10 +0000997} // namespace __sanitizer
998
Alexey Samsonovdde1f112012-06-05 07:05:10 +0000999#endif // _WIN32