blob: a13a4c5a55b23745d39523d3927e09b9761f3fc9 [file] [log] [blame]
Alexey Samsonov298dd7c2012-06-05 07:46:31 +00001//===-- sanitizer_win.cc --------------------------------------------------===//
Alexey Samsonovdde1f112012-06-05 07:05:10 +00002//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements windows-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
Evgeniy Stepanov0af67232013-03-19 14:33:38 +000014
15#include "sanitizer_platform.h"
16#if SANITIZER_WINDOWS
17
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +000018#define WIN32_LEAN_AND_MEAN
19#define NOGDI
Alexey Samsonovdde1f112012-06-05 07:05:10 +000020#include <windows.h>
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +000021#include <io.h>
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +000022#include <psapi.h>
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +000023#include <stdlib.h>
Alexey Samsonovdde1f112012-06-05 07:05:10 +000024
Alexey Samsonov201aa362012-06-06 09:43:32 +000025#include "sanitizer_common.h"
Reid Klecknere1e344e2016-11-14 17:37:50 +000026#include "sanitizer_dbghelp.h"
Vitaly Bukad4abe9e2017-07-22 01:46:40 +000027#include "sanitizer_file.h"
Alexey Samsonovdde1f112012-06-05 07:05:10 +000028#include "sanitizer_libc.h"
Dmitry Vyukovff198092013-01-14 14:28:06 +000029#include "sanitizer_mutex.h"
Sergey Matveevaf179b82013-05-08 12:45:55 +000030#include "sanitizer_placement_new.h"
Kostya Serebryany395386f2016-07-21 21:38:40 +000031#include "sanitizer_procmaps.h"
Sergey Matveevaf179b82013-05-08 12:45:55 +000032#include "sanitizer_stacktrace.h"
Etienne Bergeron3d89db42016-07-15 17:16:37 +000033#include "sanitizer_symbolizer.h"
Marcos Pividori74694b12017-01-20 21:09:36 +000034#include "sanitizer_win_defs.h"
Alexey Samsonovdde1f112012-06-05 07:05:10 +000035
Bob Haarman71a73232017-01-05 00:37:13 +000036// A macro to tell the compiler that this part of the code cannot be reached,
37// if the compiler supports this feature. Since we're using this in
38// code that is called when terminating the process, the expansion of the
39// macro should not terminate the process to avoid infinite recursion.
40#if defined(__clang__)
41# define BUILTIN_UNREACHABLE() __builtin_unreachable()
Bob Haarmanf04df852017-01-05 01:35:38 +000042#elif defined(__GNUC__) && \
43 (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
Bob Haarman71a73232017-01-05 00:37:13 +000044# define BUILTIN_UNREACHABLE() __builtin_unreachable()
45#elif defined(_MSC_VER)
46# define BUILTIN_UNREACHABLE() __assume(0)
47#else
48# define BUILTIN_UNREACHABLE()
49#endif
50
Alexey Samsonovdde1f112012-06-05 07:05:10 +000051namespace __sanitizer {
52
Peter Collingbourne6f4be192013-05-08 14:43:49 +000053#include "sanitizer_syscall_generic.inc"
54
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000055// --------------------- sanitizer_common.h
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000056uptr GetPageSize() {
Reid Kleckner0765fbc2016-02-18 17:58:22 +000057 SYSTEM_INFO si;
58 GetSystemInfo(&si);
59 return si.dwPageSize;
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000060}
61
62uptr GetMmapGranularity() {
Reid Kleckner0765fbc2016-02-18 17:58:22 +000063 SYSTEM_INFO si;
64 GetSystemInfo(&si);
65 return si.dwAllocationGranularity;
Kostya Serebryanyf22c6972012-11-23 15:38:49 +000066}
67
Timur Iskhodzhanov4245f782013-07-16 09:47:39 +000068uptr GetMaxVirtualAddress() {
69 SYSTEM_INFO si;
70 GetSystemInfo(&si);
71 return (uptr)si.lpMaximumApplicationAddress;
72}
73
Alexey Samsonovae9b18b2012-11-09 14:45:30 +000074bool FileExists(const char *filename) {
Reid Klecknere96833e2015-08-10 23:40:27 +000075 return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
Alexey Samsonovae9b18b2012-11-09 14:45:30 +000076}
77
Peter Collingbourneffaf2ea2013-05-17 16:56:53 +000078uptr internal_getpid() {
Alexey Samsonovee072902012-06-06 09:26:25 +000079 return GetProcessId(GetCurrentProcess());
80}
81
Timur Iskhodzhanov2dee3dd2013-03-25 22:04:29 +000082// In contrast to POSIX, on Windows GetCurrentThreadId()
83// returns a system-unique identifier.
Kuba Mracekceb30b02017-04-17 18:17:38 +000084tid_t GetTid() {
Alexey Samsonov70afb912012-06-15 06:37:34 +000085 return GetCurrentThreadId();
86}
87
Timur Iskhodzhanov2dee3dd2013-03-25 22:04:29 +000088uptr GetThreadSelf() {
89 return GetTid();
90}
91
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +000092#if !SANITIZER_GO
Alexey Samsonovcf4d3a02012-06-07 07:32:00 +000093void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000094 uptr *stack_bottom) {
95 CHECK(stack_top);
96 CHECK(stack_bottom);
97 MEMORY_BASIC_INFORMATION mbi;
Kostya Serebryany98390d02012-06-20 15:19:17 +000098 CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
Alexey Samsonov4b1f1032012-06-07 07:13:46 +000099 // FIXME: is it possible for the stack to not be a single allocation?
100 // Are these values what ASan expects to get (reserved, not committed;
101 // including stack guard page) ?
102 *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
103 *stack_bottom = (uptr)mbi.AllocationBase;
104}
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000105#endif // #if !SANITIZER_GO
Alexey Samsonov4b1f1032012-06-07 07:13:46 +0000106
Anna Zaks8d225202015-11-20 18:42:01 +0000107void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
Alexey Samsonovee072902012-06-06 09:26:25 +0000108 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
Reid Kleckner7d0f22e2015-08-12 23:55:38 +0000109 if (rv == 0)
Anna Zaks8d225202015-11-20 18:42:01 +0000110 ReportMmapFailureAndDie(size, mem_type, "allocate",
111 GetLastError(), raw_report);
Alexey Samsonovee072902012-06-06 09:26:25 +0000112 return rv;
113}
114
115void UnmapOrDie(void *addr, uptr size) {
Timur Iskhodzhanov89608af2015-03-31 16:39:20 +0000116 if (!size || !addr)
117 return;
118
Reid Klecknerad049142016-03-10 20:47:26 +0000119 MEMORY_BASIC_INFORMATION mbi;
120 CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
Reid Klecknerad049142016-03-10 20:47:26 +0000121
Dmitry Vyukov914b3992016-04-27 15:55:05 +0000122 // MEM_RELEASE can only be used to unmap whole regions previously mapped with
123 // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
124 // fails try MEM_DECOMMIT.
Reid Klecknerad049142016-03-10 20:47:26 +0000125 if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
Dmitry Vyukov914b3992016-04-27 15:55:05 +0000126 if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
127 Report("ERROR: %s failed to "
128 "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
129 SanitizerToolName, size, size, addr, GetLastError());
130 CHECK("unable to unmap" && 0);
131 }
Alexey Samsonove95e29c2012-06-06 15:47:40 +0000132 }
Alexey Samsonovee072902012-06-06 09:26:25 +0000133}
134
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000135static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
136 const char *mmap_type) {
137 error_t last_error = GetLastError();
138 if (last_error == ERROR_NOT_ENOUGH_MEMORY)
139 return nullptr;
140 ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
141}
142
Alex Shlyapnikov9092fe62017-06-16 18:48:08 +0000143void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
144 void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000145 if (rv == 0)
146 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
Alex Shlyapnikov9092fe62017-06-16 18:48:08 +0000147 return rv;
148}
149
Reid Klecknerad049142016-03-10 20:47:26 +0000150// We want to map a chunk of address space aligned to 'alignment'.
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000151void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
152 const char *mem_type) {
Reid Klecknerad049142016-03-10 20:47:26 +0000153 CHECK(IsPowerOfTwo(size));
154 CHECK(IsPowerOfTwo(alignment));
155
156 // Windows will align our allocations to at least 64K.
157 alignment = Max(alignment, GetMmapGranularity());
158
159 uptr mapped_addr =
160 (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
161 if (!mapped_addr)
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000162 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
Reid Klecknerad049142016-03-10 20:47:26 +0000163
164 // If we got it right on the first try, return. Otherwise, unmap it and go to
165 // the slow path.
166 if (IsAligned(mapped_addr, alignment))
167 return (void*)mapped_addr;
168 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
169 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
170
171 // If we didn't get an aligned address, overallocate, find an aligned address,
172 // unmap, and try to allocate at that aligned address.
173 int retries = 0;
174 const int kMaxRetries = 10;
175 for (; retries < kMaxRetries &&
176 (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
177 retries++) {
178 // Overallocate size + alignment bytes.
179 mapped_addr =
180 (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
181 if (!mapped_addr)
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000182 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
Reid Klecknerad049142016-03-10 20:47:26 +0000183
184 // Find the aligned address.
185 uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
186
187 // Free the overallocation.
188 if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
189 ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
190
191 // Attempt to allocate exactly the number of bytes we need at the aligned
192 // address. This may fail for a number of reasons, in which case we continue
193 // the loop.
194 mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
195 MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
196 }
197
198 // Fail if we can't make this work quickly.
199 if (retries == kMaxRetries && mapped_addr == 0)
Alex Shlyapnikovf3cc7cc2017-06-22 00:02:37 +0000200 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
Reid Klecknerad049142016-03-10 20:47:26 +0000201
202 return (void *)mapped_addr;
203}
204
Evgeniy Stepanov8e9c70b2015-05-29 22:31:28 +0000205void *MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
Kostya Serebryany98066282012-12-13 05:36:00 +0000206 // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
207 // but on Win64 it does.
Etienne Bergeron1128db82016-07-11 21:40:59 +0000208 (void)name; // unsupported
Dmitry Vyukov080dcf72016-07-20 12:50:49 +0000209#if !SANITIZER_GO && SANITIZER_WINDOWS64
210 // On asan/Windows64, use MEM_COMMIT would result in error
Etienne Bergeron1128db82016-07-11 21:40:59 +0000211 // 1455:ERROR_COMMITMENT_LIMIT.
Dmitry Vyukov080dcf72016-07-20 12:50:49 +0000212 // Asan uses exception handler to commit page on demand.
Etienne Bergeron1128db82016-07-11 21:40:59 +0000213 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
214#else
215 void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
216 PAGE_READWRITE);
217#endif
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000218 if (p == 0)
Timur Iskhodzhanov364b8b82014-03-19 08:23:00 +0000219 Report("ERROR: %s failed to "
220 "allocate %p (%zd) bytes at %p (error code: %d)\n",
221 SanitizerToolName, size, size, fixed_addr, GetLastError());
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000222 return p;
Alexey Samsonovc70d1082012-06-14 14:42:58 +0000223}
224
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000225// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
226// 'MmapFixedNoAccess'.
Kostya Serebryany98066282012-12-13 05:36:00 +0000227void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
Reid Klecknerad049142016-03-10 20:47:26 +0000228 void *p = VirtualAlloc((LPVOID)fixed_addr, size,
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000229 MEM_COMMIT, PAGE_READWRITE);
Reid Klecknerad049142016-03-10 20:47:26 +0000230 if (p == 0) {
231 char mem_type[30];
232 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
233 fixed_addr);
234 ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
235 }
236 return p;
Kostya Serebryany98066282012-12-13 05:36:00 +0000237}
238
Alex Shlyapnikov01676882017-06-26 22:54:10 +0000239void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) {
240 void *p = VirtualAlloc((LPVOID)fixed_addr, size,
241 MEM_COMMIT, PAGE_READWRITE);
242 if (p == 0) {
243 char mem_type[30];
244 internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
245 fixed_addr);
246 return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
247 }
248 return p;
249}
250
Kostya Serebryany57bfdb02013-12-13 15:03:49 +0000251void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
252 // FIXME: make this really NoReserve?
253 return MmapOrDie(size, mem_type);
254}
255
Kostya Serebryany99ed6052016-04-22 23:46:53 +0000256void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
Evgeniy Stepanov8e9c70b2015-05-29 22:31:28 +0000257 (void)name; // unsupported
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000258 void *res = VirtualAlloc((LPVOID)fixed_addr, size,
Etienne Bergeron9654f2a2016-07-07 17:44:08 +0000259 MEM_RESERVE, PAGE_NOACCESS);
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000260 if (res == 0)
261 Report("WARNING: %s failed to "
262 "mprotect %p (%zd) bytes at %p (error code: %d)\n",
263 SanitizerToolName, size, size, fixed_addr, GetLastError());
264 return res;
Alexey Samsonovc70d1082012-06-14 14:42:58 +0000265}
266
Kostya Serebryany3884f1a2016-04-23 00:05:24 +0000267void *MmapNoAccess(uptr size) {
Etienne Bergeron27eb6d52016-08-04 18:15:38 +0000268 void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
269 if (res == 0)
270 Report("WARNING: %s failed to "
271 "mprotect %p (%zd) bytes (error code: %d)\n",
272 SanitizerToolName, size, size, GetLastError());
273 return res;
Kostya Serebryany3884f1a2016-04-23 00:05:24 +0000274}
275
Timur Iskhodzhanovea1f3322015-04-10 15:02:19 +0000276bool MprotectNoAccess(uptr addr, uptr size) {
277 DWORD old_protection;
278 return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
279}
280
Evgeniy Stepanove109ef82016-11-30 20:41:59 +0000281void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
Timur Iskhodzhanov167f9e42013-02-08 12:02:00 +0000282 // This is almost useless on 32-bits.
Yury Gribov8f848ff2015-02-03 10:15:15 +0000283 // FIXME: add madvise-analog when we move to 64-bits.
Timur Iskhodzhanov167f9e42013-02-08 12:02:00 +0000284}
285
Kostya Serebryanyc6338ac2015-01-21 02:05:31 +0000286void NoHugePagesInRegion(uptr addr, uptr size) {
Kostya Serebryanyb72479b2016-08-26 23:58:42 +0000287 // FIXME: probably similar to ReleaseMemoryToOS.
Kostya Serebryanyc6338ac2015-01-21 02:05:31 +0000288}
289
Yury Gribov8f848ff2015-02-03 10:15:15 +0000290void DontDumpShadowMemory(uptr addr, uptr length) {
291 // This is almost useless on 32-bits.
292 // FIXME: add madvise-analog when we move to 64-bits.
293}
294
Kuba Mracekc1e903b2017-07-12 23:29:21 +0000295uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
296 uptr *largest_gap_found) {
Etienne Bergeronc07e5762016-09-30 17:47:34 +0000297 uptr address = 0;
298 while (true) {
299 MEMORY_BASIC_INFORMATION info;
300 if (!::VirtualQuery((void*)address, &info, sizeof(info)))
301 return 0;
302
303 if (info.State == MEM_FREE) {
304 uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
305 alignment);
306 if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
307 return shadow_address;
308 }
309
310 // Move to the next region.
311 address = (uptr)info.BaseAddress + info.RegionSize;
312 }
313 return 0;
314}
315
Alexey Samsonov40e51282012-06-15 07:29:14 +0000316bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000317 MEMORY_BASIC_INFORMATION mbi;
318 CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
Timur Iskhodzhanove5935ef2015-02-02 15:04:23 +0000319 return mbi.Protect == PAGE_NOACCESS &&
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000320 (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
Alexey Samsonov40e51282012-06-15 07:29:14 +0000321}
322
Alexey Samsonov961276a2012-07-03 08:24:14 +0000323void *MapFileToMemory(const char *file_name, uptr *buff_size) {
324 UNIMPLEMENTED();
325}
326
Daniel Sandersadf1fcc2015-07-31 11:29:25 +0000327void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
Evgeniy Stepanova00ff192014-05-28 08:26:24 +0000328 UNIMPLEMENTED();
329}
330
Alexey Samsonov7d238542013-03-14 11:29:06 +0000331static const int kMaxEnvNameLength = 128;
Dmitry Vyukove979c542013-06-10 10:02:02 +0000332static const DWORD kMaxEnvValueLength = 32767;
Alexey Samsonov83e76222013-03-14 11:10:23 +0000333
334namespace {
335
336struct EnvVariable {
337 char name[kMaxEnvNameLength];
338 char value[kMaxEnvValueLength];
339};
340
341} // namespace
342
343static const int kEnvVariables = 5;
344static EnvVariable env_vars[kEnvVariables];
345static int num_env_vars;
346
Alexey Samsonov0c53a382012-06-14 14:07:21 +0000347const char *GetEnv(const char *name) {
Alexey Samsonov83e76222013-03-14 11:10:23 +0000348 // Note: this implementation caches the values of the environment variables
349 // and limits their quantity.
350 for (int i = 0; i < num_env_vars; i++) {
351 if (0 == internal_strcmp(name, env_vars[i].name))
352 return env_vars[i].value;
353 }
354 CHECK_LT(num_env_vars, kEnvVariables);
355 DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
356 kMaxEnvValueLength);
357 if (rv > 0 && rv < kMaxEnvValueLength) {
358 CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
359 internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
360 num_env_vars++;
361 return env_vars[num_env_vars - 1].value;
362 }
Alexey Samsonov0c53a382012-06-14 14:07:21 +0000363 return 0;
364}
365
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000366const char *GetPwd() {
367 UNIMPLEMENTED();
368}
369
Alexey Samsonov9211bd32013-02-18 07:17:12 +0000370u32 GetUid() {
371 UNIMPLEMENTED();
372}
373
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000374namespace {
375struct ModuleInfo {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000376 const char *filepath;
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000377 uptr base_address;
378 uptr end_address;
379};
380
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000381#if !SANITIZER_GO
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000382int CompareModulesBase(const void *pl, const void *pr) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000383 const ModuleInfo *l = (ModuleInfo *)pl, *r = (ModuleInfo *)pr;
384 if (l->base_address < r->base_address)
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000385 return -1;
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000386 return l->base_address > r->base_address;
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000387}
Dmitry Vyukov8455cf02015-11-12 16:29:24 +0000388#endif
Timur Iskhodzhanova023e062014-12-30 15:30:19 +0000389} // namespace
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000390
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000391#if !SANITIZER_GO
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000392void DumpProcessMap() {
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000393 Report("Dumping process modules:\n");
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000394 ListOfModules modules;
395 modules.init();
396 uptr num_modules = modules.size();
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000397
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000398 InternalScopedBuffer<ModuleInfo> module_infos(num_modules);
399 for (size_t i = 0; i < num_modules; ++i) {
400 module_infos[i].filepath = modules[i].full_name();
Reid Kleckner628d6b52016-08-05 17:55:00 +0000401 module_infos[i].base_address = modules[i].ranges().front()->beg;
402 module_infos[i].end_address = modules[i].ranges().back()->end;
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000403 }
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000404 qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
405 CompareModulesBase);
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000406
407 for (size_t i = 0; i < num_modules; ++i) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000408 const ModuleInfo &mi = module_infos[i];
Timur Iskhodzhanov64fc8e42014-12-30 14:44:12 +0000409 if (mi.end_address != 0) {
410 Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000411 mi.filepath[0] ? mi.filepath : "[no name]");
Timur Iskhodzhanov1931b622015-04-06 12:54:06 +0000412 } else if (mi.filepath[0]) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000413 Printf("\t??\?-??? %s\n", mi.filepath);
Timur Iskhodzhanova04b33b2014-12-26 14:28:32 +0000414 } else {
415 Printf("\t???\n");
416 }
417 }
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000418}
Dmitry Vyukovb3381fa2015-02-16 13:51:17 +0000419#endif
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000420
Kuba Mracekb6c6eaf2017-01-06 20:57:47 +0000421void PrintModuleMap() { }
422
Alexey Samsonov34e2b282014-08-12 22:31:19 +0000423void DisableCoreDumperIfNecessary() {
Timur Iskhodzhanov7d5c81d2014-05-06 08:21:50 +0000424 // Do nothing.
Alexey Samsonovae1e1712012-06-15 06:08:19 +0000425}
426
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000427void ReExec() {
428 UNIMPLEMENTED();
429}
430
Sergey Matveev6cb47a082014-05-19 12:53:03 +0000431void PrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
Alexander Potapenko1746f552012-12-10 13:10:40 +0000432}
433
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000434bool StackSizeIsUnlimited() {
435 UNIMPLEMENTED();
Alexey Samsonov97ca3062012-09-17 09:12:39 +0000436}
437
438void SetStackSizeLimitInBytes(uptr limit) {
439 UNIMPLEMENTED();
440}
441
Alexey Samsonov34e2b282014-08-12 22:31:19 +0000442bool AddressSpaceIsUnlimited() {
443 UNIMPLEMENTED();
444}
445
446void SetAddressSpaceUnlimited() {
447 UNIMPLEMENTED();
448}
449
Anna Zaks22490492015-02-27 03:12:19 +0000450bool IsPathSeparator(const char c) {
451 return c == '\\' || c == '/';
452}
453
454bool IsAbsolutePath(const char *path) {
455 UNIMPLEMENTED();
456}
457
Alexey Samsonov70afb912012-06-15 06:37:34 +0000458void SleepForSeconds(int seconds) {
459 Sleep(seconds * 1000);
460}
461
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000462void SleepForMillis(int millis) {
463 Sleep(millis);
464}
465
Dmitry Vyukove979c542013-06-10 10:02:02 +0000466u64 NanoTime() {
467 return 0;
468}
469
Alexey Samsonov70afb912012-06-15 06:37:34 +0000470void Abort() {
Timur Iskhodzhanovb8373bc2014-12-26 12:25:54 +0000471 internal__exit(3);
Alexey Samsonov70afb912012-06-15 06:37:34 +0000472}
473
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000474#if !SANITIZER_GO
Reid Kleckner646386e2015-08-03 19:51:18 +0000475// Read the file to extract the ImageBase field from the PE header. If ASLR is
476// disabled and this virtual address is available, the loader will typically
477// load the image at this address. Therefore, we call it the preferred base. Any
478// addresses in the DWARF typically assume that the object has been loaded at
479// this address.
480static uptr GetPreferredBase(const char *modname) {
481 fd_t fd = OpenFile(modname, RdOnly, nullptr);
482 if (fd == kInvalidFd)
483 return 0;
484 FileCloser closer(fd);
485
486 // Read just the DOS header.
487 IMAGE_DOS_HEADER dos_header;
488 uptr bytes_read;
489 if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
490 bytes_read != sizeof(dos_header))
491 return 0;
492
493 // The file should start with the right signature.
494 if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
495 return 0;
496
497 // The layout at e_lfanew is:
498 // "PE\0\0"
499 // IMAGE_FILE_HEADER
500 // IMAGE_OPTIONAL_HEADER
501 // Seek to e_lfanew and read all that data.
502 char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
503 if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
504 INVALID_SET_FILE_POINTER)
505 return 0;
506 if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
507 bytes_read != sizeof(buf))
508 return 0;
509
510 // Check for "PE\0\0" before the PE header.
511 char *pe_sig = &buf[0];
512 if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
513 return 0;
514
515 // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
516 IMAGE_OPTIONAL_HEADER *pe_header =
517 (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
518
519 // Check for more magic in the PE header.
520 if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
521 return 0;
522
523 // Finally, return the ImageBase.
524 return (uptr)pe_header->ImageBase;
525}
526
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000527void ListOfModules::init() {
528 clear();
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000529 HANDLE cur_process = GetCurrentProcess();
530
531 // Query the list of modules. Start by assuming there are no more than 256
532 // modules and retry if that's not sufficient.
533 HMODULE *hmodules = 0;
534 uptr modules_buffer_size = sizeof(HMODULE) * 256;
535 DWORD bytes_required;
536 while (!hmodules) {
537 hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
538 CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
539 &bytes_required));
540 if (bytes_required > modules_buffer_size) {
541 // Either there turned out to be more than 256 hmodules, or new hmodules
542 // could have loaded since the last try. Retry.
543 UnmapOrDie(hmodules, modules_buffer_size);
544 hmodules = 0;
545 modules_buffer_size = bytes_required;
546 }
547 }
548
549 // |num_modules| is the number of modules actually present,
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000550 size_t num_modules = bytes_required / sizeof(HMODULE);
551 for (size_t i = 0; i < num_modules; ++i) {
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000552 HMODULE handle = hmodules[i];
553 MODULEINFO mi;
554 if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
555 continue;
556
Reid Kleckner646386e2015-08-03 19:51:18 +0000557 // Get the UTF-16 path and convert to UTF-8.
558 wchar_t modname_utf16[kMaxPathLength];
559 int modname_utf16_len =
560 GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
561 if (modname_utf16_len == 0)
562 modname_utf16[0] = '\0';
563 char module_name[kMaxPathLength];
564 int module_name_len =
565 ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
566 &module_name[0], kMaxPathLength, NULL, NULL);
567 module_name[module_name_len] = '\0';
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000568
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000569 uptr base_address = (uptr)mi.lpBaseOfDll;
570 uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
Reid Kleckner646386e2015-08-03 19:51:18 +0000571
572 // Adjust the base address of the module so that we get a VA instead of an
573 // RVA when computing the module offset. This helps llvm-symbolizer find the
574 // right DWARF CU. In the common case that the image is loaded at it's
575 // preferred address, we will now print normal virtual addresses.
576 uptr preferred_base = GetPreferredBase(&module_name[0]);
577 uptr adjusted_base = base_address - preferred_base;
578
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000579 LoadedModule cur_module;
580 cur_module.set(module_name, adjusted_base);
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000581 // We add the whole module as one single address range.
Francis Ricci7c6bf1c2017-04-17 16:34:38 +0000582 cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
Francis Riccidd592ff2017-05-19 13:34:02 +0000583 /*writable*/ true);
Alexey Samsonov8e3cbde2016-02-22 18:52:51 +0000584 modules_.push_back(cur_module);
Timur Iskhodzhanovb97bcc42015-04-06 12:49:30 +0000585 }
586 UnmapOrDie(hmodules, modules_buffer_size);
Alexey Samsonov7a36e612013-09-10 14:36:16 +0000587};
588
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000589// We can't use atexit() directly at __asan_init time as the CRT is not fully
590// initialized at this point. Place the functions into a vector and use
591// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
592InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
593
Alexey Samsonov70afb912012-06-15 06:37:34 +0000594int Atexit(void (*function)(void)) {
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000595 atexit_functions.push_back(function);
596 return 0;
Alexey Samsonov70afb912012-06-15 06:37:34 +0000597}
Timur Iskhodzhanovad3ec822015-04-02 14:48:08 +0000598
599static int RunAtexit() {
600 int ret = 0;
601 for (uptr i = 0; i < atexit_functions.size(); ++i) {
602 ret |= atexit(atexit_functions[i]);
603 }
604 return ret;
605}
606
607#pragma section(".CRT$XID", long, read) // NOLINT
Reid Klecknere4fabc92015-08-13 16:40:54 +0000608__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000609#endif
Alexey Samsonov70afb912012-06-15 06:37:34 +0000610
Alexey Samsonov4b1f1032012-06-07 07:13:46 +0000611// ------------------ sanitizer_libc.h
Timur Iskhodzhanov864308a2015-04-09 12:37:05 +0000612fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000613 // FIXME: Use the wide variants to handle Unicode filenames.
Reid Kleckner646386e2015-08-03 19:51:18 +0000614 fd_t res;
615 if (mode == RdOnly) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000616 res = CreateFileA(filename, GENERIC_READ,
617 FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
618 nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000619 } else if (mode == WrOnly) {
Reid Kleckner02d53152016-06-23 15:40:42 +0000620 res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
621 FILE_ATTRIBUTE_NORMAL, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000622 } else {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000623 UNIMPLEMENTED();
Reid Kleckner646386e2015-08-03 19:51:18 +0000624 }
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000625 CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
626 CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
Timur Iskhodzhanov8a673682015-04-23 12:57:29 +0000627 if (res == kInvalidFd && last_error)
628 *last_error = GetLastError();
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000629 return res;
Alexey Samsonov03c8b842012-06-05 08:32:53 +0000630}
631
Timur Iskhodzhanov864308a2015-04-09 12:37:05 +0000632void CloseFile(fd_t fd) {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000633 CloseHandle(fd);
Alexey Samsonovdde1f112012-06-05 07:05:10 +0000634}
635
Timur Iskhodzhanov2b391692015-04-09 13:38:14 +0000636bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
637 error_t *error_p) {
Reid Kleckner646386e2015-08-03 19:51:18 +0000638 CHECK(fd != kInvalidFd);
Hans Wennborg92e64122015-08-05 17:55:26 +0000639
640 // bytes_read can't be passed directly to ReadFile:
641 // uptr is unsigned long long on 64-bit Windows.
642 unsigned long num_read_long;
643
644 bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
Reid Kleckner646386e2015-08-03 19:51:18 +0000645 if (!success && error_p)
646 *error_p = GetLastError();
Hans Wennborg92e64122015-08-05 17:55:26 +0000647 if (bytes_read)
648 *bytes_read = num_read_long;
Reid Kleckner646386e2015-08-03 19:51:18 +0000649 return success;
Timur Iskhodzhanov2b391692015-04-09 13:38:14 +0000650}
651
Timur Iskhodzhanovc2c9ea52015-04-08 17:42:57 +0000652bool SupportsColoredOutput(fd_t fd) {
653 // FIXME: support colored output.
654 return false;
655}
656
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000657bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
658 error_t *error_p) {
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000659 CHECK(fd != kInvalidFd);
Timur Iskhodzhanovaeefb6a2014-02-04 23:28:30 +0000660
Reid Klecknere96833e2015-08-10 23:40:27 +0000661 // Handle null optional parameters.
662 error_t dummy_error;
663 error_p = error_p ? error_p : &dummy_error;
664 uptr dummy_bytes_written;
665 bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
666
667 // Initialize output parameters in case we fail.
668 *error_p = 0;
669 *bytes_written = 0;
670
671 // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
672 // closed, in which case this will fail.
673 if (fd == kStdoutFd || fd == kStderrFd) {
674 fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
675 if (fd == 0) {
676 *error_p = ERROR_INVALID_HANDLE;
677 return false;
678 }
Timur Iskhodzhanovaeefb6a2014-02-04 23:28:30 +0000679 }
680
Reid Klecknere96833e2015-08-10 23:40:27 +0000681 DWORD bytes_written_32;
682 if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
683 *error_p = GetLastError();
Timur Iskhodzhanov007435c2015-04-09 15:25:21 +0000684 return false;
685 } else {
Reid Klecknere96833e2015-08-10 23:40:27 +0000686 *bytes_written = bytes_written_32;
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000687 return true;
688 }
Alexey Samsonov03c8b842012-06-05 08:32:53 +0000689}
690
Timur Iskhodzhanova6600a92015-04-09 14:45:17 +0000691bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
692 UNIMPLEMENTED();
693}
694
Peter Collingbourne6f4be192013-05-08 14:43:49 +0000695uptr internal_sched_yield() {
Dmitry Vyukov0ff6d2d2012-11-06 13:19:59 +0000696 Sleep(0);
697 return 0;
Alexey Samsonov58a3c582012-06-18 08:44:30 +0000698}
699
Alexey Samsonovaadd1f22013-02-20 13:54:32 +0000700void internal__exit(int exitcode) {
Reid Kleckner76b42612016-11-09 21:27:58 +0000701 // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
Reid Kleckner949f9b92016-11-10 20:44:05 +0000702 // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
703 // so add our own breakpoint here.
704 if (::IsDebuggerPresent())
705 __debugbreak();
Reid Kleckner2a2bc722016-11-11 17:51:51 +0000706 TerminateProcess(GetCurrentProcess(), exitcode);
Bob Haarman71a73232017-01-05 00:37:13 +0000707 BUILTIN_UNREACHABLE();
Alexey Samsonovaadd1f22013-02-20 13:54:32 +0000708}
709
Evgeniy Stepanova00ff192014-05-28 08:26:24 +0000710uptr internal_ftruncate(fd_t fd, uptr size) {
711 UNIMPLEMENTED();
712}
713
Kostya Serebryany6c54a6b2014-12-09 01:22:59 +0000714uptr GetRSS() {
715 return 0;
716}
717
Kostya Serebryany43eb7732014-12-16 19:13:01 +0000718void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
Hans Wennborg7dd94572014-12-16 20:46:05 +0000719void internal_join_thread(void *th) { }
Kostya Serebryany43eb7732014-12-16 19:13:01 +0000720
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000721// ---------------------- BlockingMutex ---------------- {{{1
Dmitry Vyukovfa67ed42013-02-04 10:42:38 +0000722const uptr LOCK_UNINITIALIZED = 0;
723const uptr LOCK_READY = (uptr)-1;
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000724
725BlockingMutex::BlockingMutex(LinkerInitialized li) {
726 // FIXME: see comments in BlockingMutex::Lock() for the details.
727 CHECK(li == LINKER_INITIALIZED || owner_ == LOCK_UNINITIALIZED);
728
729 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
730 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
731 owner_ = LOCK_READY;
732}
733
Alexey Samsonova097f7b2013-03-14 13:30:56 +0000734BlockingMutex::BlockingMutex() {
735 CHECK(sizeof(CRITICAL_SECTION) <= sizeof(opaque_storage_));
736 InitializeCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
737 owner_ = LOCK_READY;
738}
739
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000740void BlockingMutex::Lock() {
741 if (owner_ == LOCK_UNINITIALIZED) {
742 // FIXME: hm, global BlockingMutex objects are not initialized?!?
743 // This might be a side effect of the clang+cl+link Frankenbuild...
744 new(this) BlockingMutex((LinkerInitialized)(LINKER_INITIALIZED + 1));
745
746 // FIXME: If it turns out the linker doesn't invoke our
747 // constructors, we should probably manually Lock/Unlock all the global
748 // locks while we're starting in one thread to avoid double-init races.
749 }
750 EnterCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
Dmitry Vyukov7981ea82013-02-04 08:07:45 +0000751 CHECK_EQ(owner_, LOCK_READY);
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000752 owner_ = GetThreadSelf();
753}
754
755void BlockingMutex::Unlock() {
Dmitry Vyukov7981ea82013-02-04 08:07:45 +0000756 CHECK_EQ(owner_, GetThreadSelf());
Dmitry Vyukovf22982b2013-01-14 07:51:39 +0000757 owner_ = LOCK_READY;
758 LeaveCriticalSection((LPCRITICAL_SECTION)opaque_storage_);
759}
760
Alexey Samsonovdb7d9652013-03-11 15:45:20 +0000761void BlockingMutex::CheckLocked() {
762 CHECK_EQ(owner_, GetThreadSelf());
763}
764
Evgeniy Stepanov5697b582013-03-13 08:19:53 +0000765uptr GetTlsSize() {
766 return 0;
767}
768
769void InitTlsSize() {
770}
771
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000772void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
773 uptr *tls_addr, uptr *tls_size) {
Dmitry Vyukov5dc44362016-10-28 20:14:18 +0000774#if SANITIZER_GO
Dmitry Vyukovb278f122013-06-10 10:30:54 +0000775 *stk_addr = 0;
776 *stk_size = 0;
777 *tls_addr = 0;
778 *tls_size = 0;
779#else
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000780 uptr stack_top, stack_bottom;
781 GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
782 *stk_addr = stack_bottom;
783 *stk_size = stack_top - stack_bottom;
784 *tls_addr = 0;
785 *tls_size = 0;
Dmitry Vyukovb278f122013-06-10 10:30:54 +0000786#endif
Sergey Matveev954c6ef12013-05-07 14:41:43 +0000787}
788
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000789#if !SANITIZER_GO
Evgeniy Stepanov8eb82042015-01-22 13:47:12 +0000790void BufferedStackTrace::SlowUnwindStack(uptr pc, u32 max_depth) {
Alexey Samsonov3e8467b2014-03-04 12:21:28 +0000791 CHECK_GE(max_depth, 2);
Sergey Matveevaf179b82013-05-08 12:45:55 +0000792 // FIXME: CaptureStackBackTrace might be too slow for us.
793 // FIXME: Compare with StackWalk64.
794 // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
Etienne Bergeron85be3af2016-07-14 22:04:28 +0000795 size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
Timur Iskhodzhanov1f1c7ec2013-11-09 13:59:12 +0000796 (void**)trace, 0);
Timur Iskhodzhanov89a346c2013-12-10 08:30:39 +0000797 if (size == 0)
798 return;
799
Sergey Matveevaf179b82013-05-08 12:45:55 +0000800 // Skip the RTL frames by searching for the PC in the stacktrace.
Timur Iskhodzhanov1f1c7ec2013-11-09 13:59:12 +0000801 uptr pc_location = LocatePcInTrace(pc);
802 PopStackFrames(pc_location);
Sergey Matveevaf179b82013-05-08 12:45:55 +0000803}
804
Alexey Samsonov9c859272014-10-26 03:35:14 +0000805void BufferedStackTrace::SlowUnwindStackWithContext(uptr pc, void *context,
Evgeniy Stepanov8eb82042015-01-22 13:47:12 +0000806 u32 max_depth) {
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000807 CONTEXT ctx = *(CONTEXT *)context;
808 STACKFRAME64 stack_frame;
809 memset(&stack_frame, 0, sizeof(stack_frame));
Etienne Bergeron3d89db42016-07-15 17:16:37 +0000810
Etienne Bergerond4528b22016-07-21 02:32:37 +0000811 InitializeDbgHelpIfNeeded();
812
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000813 size = 0;
814#if defined(_WIN64)
815 int machine_type = IMAGE_FILE_MACHINE_AMD64;
816 stack_frame.AddrPC.Offset = ctx.Rip;
817 stack_frame.AddrFrame.Offset = ctx.Rbp;
818 stack_frame.AddrStack.Offset = ctx.Rsp;
819#else
820 int machine_type = IMAGE_FILE_MACHINE_I386;
821 stack_frame.AddrPC.Offset = ctx.Eip;
822 stack_frame.AddrFrame.Offset = ctx.Ebp;
823 stack_frame.AddrStack.Offset = ctx.Esp;
824#endif
825 stack_frame.AddrPC.Mode = AddrModeFlat;
826 stack_frame.AddrFrame.Mode = AddrModeFlat;
827 stack_frame.AddrStack.Mode = AddrModeFlat;
828 while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
Reid Klecknere1e344e2016-11-14 17:37:50 +0000829 &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
830 SymGetModuleBase64, NULL) &&
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000831 size < Min(max_depth, kStackTraceMax)) {
Hans Wennborgf89a3f862014-10-26 19:27:02 +0000832 trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
Timur Iskhodzhanov19853dd2014-07-11 11:57:41 +0000833 }
Evgeniy Stepanove5a447d2014-02-11 13:57:17 +0000834}
Dmitry Vyukovaa8fa602014-09-01 11:44:59 +0000835#endif // #if !SANITIZER_GO
Evgeniy Stepanove5a447d2014-02-11 13:57:17 +0000836
Alexey Samsonov3a41ed12014-12-11 18:30:25 +0000837void ReportFile::Write(const char *buffer, uptr length) {
838 SpinMutexLock l(mu);
839 ReopenIfNecessary();
Timur Iskhodzhanove8a6fbb2015-04-09 14:11:25 +0000840 if (!WriteToFile(fd, buffer, length)) {
Reid Klecknerd483c072013-09-05 03:19:57 +0000841 // stderr may be closed, but we may be able to print to the debugger
842 // instead. This is the case when launching a program from Visual Studio,
843 // and the following routine should write to its console.
844 OutputDebugStringA(buffer);
845 }
846}
847
Alexander Potapenkod8d490e2014-01-28 11:12:29 +0000848void SetAlternateSignalStack() {
849 // FIXME: Decide what to do on Windows.
850}
851
852void UnsetAlternateSignalStack() {
853 // FIXME: Decide what to do on Windows.
854}
855
Alexander Potapenkoea4a0db2014-01-31 15:11:11 +0000856void InstallDeadlySignalHandlers(SignalHandlerType handler) {
857 (void)handler;
Alexander Potapenko789e3e12014-01-31 13:10:07 +0000858 // FIXME: Decide what to do on Windows.
859}
860
Vitaly Bukaa05da1f2017-05-25 23:42:33 +0000861HandleSignalMode GetHandleSignalMode(int signum) {
Alexander Potapenko789e3e12014-01-31 13:10:07 +0000862 // FIXME: Decide what to do on Windows.
Vitaly Bukaa05da1f2017-05-25 23:42:33 +0000863 return kHandleSignalNo;
Alexander Potapenko789e3e12014-01-31 13:10:07 +0000864}
865
Marcos Pividoriee221562017-02-02 23:01:51 +0000866// Check based on flags if we should handle this exception.
867bool IsHandledDeadlyException(DWORD exceptionCode) {
868 switch (exceptionCode) {
869 case EXCEPTION_ACCESS_VIOLATION:
870 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
871 case EXCEPTION_STACK_OVERFLOW:
872 case EXCEPTION_DATATYPE_MISALIGNMENT:
873 case EXCEPTION_IN_PAGE_ERROR:
874 return common_flags()->handle_segv;
875 case EXCEPTION_ILLEGAL_INSTRUCTION:
876 case EXCEPTION_PRIV_INSTRUCTION:
877 case EXCEPTION_BREAKPOINT:
878 return common_flags()->handle_sigill;
879 case EXCEPTION_FLT_DENORMAL_OPERAND:
880 case EXCEPTION_FLT_DIVIDE_BY_ZERO:
881 case EXCEPTION_FLT_INEXACT_RESULT:
882 case EXCEPTION_FLT_INVALID_OPERATION:
883 case EXCEPTION_FLT_OVERFLOW:
884 case EXCEPTION_FLT_STACK_CHECK:
885 case EXCEPTION_FLT_UNDERFLOW:
886 case EXCEPTION_INT_DIVIDE_BY_ZERO:
887 case EXCEPTION_INT_OVERFLOW:
888 return common_flags()->handle_sigfpe;
889 }
890 return false;
891}
892
Marcos Pividorife9288a2017-02-02 23:01:59 +0000893const char *DescribeSignalOrException(int signo) {
894 unsigned code = signo;
895 // Get the string description of the exception if this is a known deadly
896 // exception.
897 switch (code) {
898 case EXCEPTION_ACCESS_VIOLATION: return "access-violation";
899 case EXCEPTION_ARRAY_BOUNDS_EXCEEDED: return "array-bounds-exceeded";
900 case EXCEPTION_STACK_OVERFLOW: return "stack-overflow";
901 case EXCEPTION_DATATYPE_MISALIGNMENT: return "datatype-misalignment";
902 case EXCEPTION_IN_PAGE_ERROR: return "in-page-error";
903 case EXCEPTION_ILLEGAL_INSTRUCTION: return "illegal-instruction";
904 case EXCEPTION_PRIV_INSTRUCTION: return "priv-instruction";
905 case EXCEPTION_BREAKPOINT: return "breakpoint";
906 case EXCEPTION_FLT_DENORMAL_OPERAND: return "flt-denormal-operand";
907 case EXCEPTION_FLT_DIVIDE_BY_ZERO: return "flt-divide-by-zero";
908 case EXCEPTION_FLT_INEXACT_RESULT: return "flt-inexact-result";
909 case EXCEPTION_FLT_INVALID_OPERATION: return "flt-invalid-operation";
910 case EXCEPTION_FLT_OVERFLOW: return "flt-overflow";
911 case EXCEPTION_FLT_STACK_CHECK: return "flt-stack-check";
912 case EXCEPTION_FLT_UNDERFLOW: return "flt-underflow";
913 case EXCEPTION_INT_DIVIDE_BY_ZERO: return "int-divide-by-zero";
914 case EXCEPTION_INT_OVERFLOW: return "int-overflow";
915 }
916 return "unknown exception";
917}
918
Alexey Samsonov1947bf92014-09-17 17:56:15 +0000919bool IsAccessibleMemoryRange(uptr beg, uptr size) {
Peter Collingbournea68d90f2015-07-02 22:08:38 +0000920 SYSTEM_INFO si;
921 GetNativeSystemInfo(&si);
922 uptr page_size = si.dwPageSize;
923 uptr page_mask = ~(page_size - 1);
924
925 for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
926 page <= end;) {
927 MEMORY_BASIC_INFORMATION info;
928 if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
929 return false;
930
931 if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
932 info.Protect == PAGE_EXECUTE)
933 return false;
934
935 if (info.RegionSize == 0)
936 return false;
937
938 page += info.RegionSize;
939 }
940
Alexey Samsonov1947bf92014-09-17 17:56:15 +0000941 return true;
942}
943
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000944SignalContext SignalContext::Create(void *siginfo, void *context) {
Evgeniy Stepanovaa42f292016-02-09 00:28:57 +0000945 EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
946 CONTEXT *context_record = (CONTEXT *)context;
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000947
948 uptr pc = (uptr)exception_record->ExceptionAddress;
949#ifdef _WIN64
950 uptr bp = (uptr)context_record->Rbp;
951 uptr sp = (uptr)context_record->Rsp;
952#else
953 uptr bp = (uptr)context_record->Ebp;
954 uptr sp = (uptr)context_record->Esp;
955#endif
956 uptr access_addr = exception_record->ExceptionInformation[1];
957
Reid Klecknerceda8832016-02-11 16:44:35 +0000958 // The contents of this array are documented at
959 // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
960 // The first element indicates read as 0, write as 1, or execute as 8. The
961 // second element is the faulting address.
962 WriteFlag write_flag = SignalContext::UNKNOWN;
963 switch (exception_record->ExceptionInformation[0]) {
964 case 0: write_flag = SignalContext::READ; break;
965 case 1: write_flag = SignalContext::WRITE; break;
966 case 8: write_flag = SignalContext::UNKNOWN; break;
967 }
968 bool is_memory_access = write_flag != SignalContext::UNKNOWN;
Kostya Serebryany2b9be252016-02-04 02:02:09 +0000969 return SignalContext(context, access_addr, pc, sp, bp, is_memory_access,
Evgeniy Stepanov0d7839d2016-02-08 23:01:06 +0000970 write_flag);
Dmitry Vyukovdf01bdc2015-03-02 17:45:18 +0000971}
972
Kuba Mracek073cea62016-11-26 00:50:08 +0000973void SignalContext::DumpAllRegisters(void *context) {
974 // FIXME: Implement this.
975}
976
Yury Gribovc019a572015-06-04 07:29:43 +0000977uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
978 // FIXME: Actually implement this function.
979 CHECK_GT(buf_len, 0);
980 buf[0] = 0;
981 return 0;
982}
983
Evgeniy Stepanov30257172015-07-28 21:01:42 +0000984uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
985 return ReadBinaryName(buf, buf_len);
986}
987
Adhemerval Zanella05636862015-09-11 13:55:00 +0000988void CheckVMASize() {
989 // Do nothing.
990}
991
Kuba Brecka65aa45e2015-12-03 10:39:43 +0000992void MaybeReexec() {
993 // No need to re-exec on Windows.
994}
995
Maxim Ostapenko1965cc62016-01-18 07:55:12 +0000996char **GetArgv() {
997 // FIXME: Actually implement this function.
998 return 0;
999}
1000
Mike Aizatsky4a933162016-01-26 20:10:01 +00001001pid_t StartSubprocess(const char *program, const char *const argv[],
1002 fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
1003 // FIXME: implement on this platform
1004 // Should be implemented based on
1005 // SymbolizerProcess::StarAtSymbolizerSubprocess
1006 // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
1007 return -1;
1008}
1009
1010bool IsProcessRunning(pid_t pid) {
1011 // FIXME: implement on this platform.
1012 return false;
1013}
1014
Mike Aizatsky26542992016-01-27 23:51:36 +00001015int WaitForProcess(pid_t pid) { return -1; }
1016
Kostya Serebryany395386f2016-07-21 21:38:40 +00001017// FIXME implement on this platform.
1018void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
1019
Maxim Ostapenko76630d42017-03-09 10:47:38 +00001020void CheckNoDeepBind(const char *filename, int flag) {
1021 // Do nothing.
1022}
Kostya Serebryany395386f2016-07-21 21:38:40 +00001023
Kostya Kortchinsky2b053b12017-06-21 15:56:03 +00001024// FIXME: implement on this platform.
Kostya Kortchinskye1dde072017-08-14 14:53:47 +00001025bool GetRandom(void *buffer, uptr length, bool blocking) {
Kostya Kortchinsky2b053b12017-06-21 15:56:03 +00001026 UNIMPLEMENTED();
1027}
1028
Alexey Samsonovdde1f112012-06-05 07:05:10 +00001029} // namespace __sanitizer
1030
Alexey Samsonovdde1f112012-06-05 07:05:10 +00001031#endif // _WIN32