blob: de4b8d1406e30d9500647ed8deeb89aacb45f3e3 [file] [log] [blame]
Alexey Samsonov1f11d312012-06-05 09:49:25 +00001//===-- sanitizer_posix.cc ------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements POSIX-specific functions from
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -070012// sanitizer_posix.h.
Alexey Samsonov1f11d312012-06-05 09:49:25 +000013//===----------------------------------------------------------------------===//
Evgeniy Stepanov24e13722013-03-19 14:33:38 +000014
15#include "sanitizer_platform.h"
Stephen Hines2d1fdb22014-05-28 23:58:16 -070016#if SANITIZER_POSIX
Alexey Samsonov1f11d312012-06-05 09:49:25 +000017
Alexey Samsonov230c3be2012-06-06 09:26:25 +000018#include "sanitizer_common.h"
Alexey Samsonov1f11d312012-06-05 09:49:25 +000019#include "sanitizer_libc.h"
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -070020#include "sanitizer_posix.h"
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000021#include "sanitizer_procmaps.h"
Sergey Matveev736cf492013-05-08 12:45:55 +000022#include "sanitizer_stacktrace.h"
Alexey Samsonov1f11d312012-06-05 09:49:25 +000023
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -070024#include <fcntl.h>
25#include <signal.h>
Alexey Samsonov230c3be2012-06-06 09:26:25 +000026#include <sys/mman.h>
Alexey Samsonov1f11d312012-06-05 09:49:25 +000027
Stephen Hines2d1fdb22014-05-28 23:58:16 -070028#if SANITIZER_LINUX
29#include <sys/utsname.h>
30#endif
31
32#if SANITIZER_LINUX && !SANITIZER_ANDROID
33#include <sys/personality.h>
34#endif
35
Stephen Hines86277eb2015-03-23 12:06:32 -070036#if SANITIZER_FREEBSD
37// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
38// that, it was never implemented. So just define it to zero.
39#undef MAP_NORESERVE
40#define MAP_NORESERVE 0
41#endif
42
Alexey Samsonov1f11d312012-06-05 09:49:25 +000043namespace __sanitizer {
44
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000045// ------------- sanitizer_common.h
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000046uptr GetMmapGranularity() {
47 return GetPageSize();
48}
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000049
Stephen Hines2d1fdb22014-05-28 23:58:16 -070050#if SANITIZER_WORDSIZE == 32
51// Take care of unusable kernel area in top gigabyte.
52static uptr GetKernelAreaSize() {
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -070053#if SANITIZER_LINUX && !SANITIZER_X32
Stephen Hines2d1fdb22014-05-28 23:58:16 -070054 const uptr gbyte = 1UL << 30;
55
56 // Firstly check if there are writable segments
57 // mapped to top gigabyte (e.g. stack).
58 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
59 uptr end, prot;
60 while (proc_maps.Next(/*start*/0, &end,
61 /*offset*/0, /*filename*/0,
62 /*filename_size*/0, &prot)) {
63 if ((end >= 3 * gbyte)
64 && (prot & MemoryMappingLayout::kProtectionWrite) != 0)
65 return 0;
66 }
67
68#if !SANITIZER_ANDROID
69 // Even if nothing is mapped, top Gb may still be accessible
70 // if we are running on 64-bit kernel.
71 // Uname may report misleading results if personality type
72 // is modified (e.g. under schroot) so check this as well.
73 struct utsname uname_info;
74 int pers = personality(0xffffffffUL);
75 if (!(pers & PER_MASK)
76 && uname(&uname_info) == 0
77 && internal_strstr(uname_info.machine, "64"))
78 return 0;
79#endif // SANITIZER_ANDROID
80
81 // Top gigabyte is reserved for kernel.
82 return gbyte;
83#else
84 return 0;
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -070085#endif // SANITIZER_LINUX && !SANITIZER_X32
Stephen Hines2d1fdb22014-05-28 23:58:16 -070086}
87#endif // SANITIZER_WORDSIZE == 32
88
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +000089uptr GetMaxVirtualAddress() {
90#if SANITIZER_WORDSIZE == 64
Stephen Hines86277eb2015-03-23 12:06:32 -070091# if defined(__powerpc64__) || defined(__aarch64__)
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +000092 // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
Stephen Hines2d1fdb22014-05-28 23:58:16 -070093 // We somehow need to figure out which one we are using now and choose
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +000094 // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
95 // Note that with 'ulimit -s unlimited' the stack is moved away from the top
96 // of the address space, so simply checking the stack address is not enough.
Stephen Hines6d186232014-11-26 17:56:19 -080097 // This should (does) work for both PowerPC64 Endian modes.
Stephen Hines86277eb2015-03-23 12:06:32 -070098 // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
Stephen Hines6d186232014-11-26 17:56:19 -080099 return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
Stephen Hines6d186232014-11-26 17:56:19 -0800100# elif defined(__mips64)
101 return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +0000102# else
103 return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
104# endif
105#else // SANITIZER_WORDSIZE == 32
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700106 uptr res = (1ULL << 32) - 1; // 0xffffffff;
107 if (!common_flags()->full_address_space)
108 res -= GetKernelAreaSize();
109 CHECK_LT(reinterpret_cast<uptr>(&res), res);
110 return res;
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +0000111#endif // SANITIZER_WORDSIZE
112}
113
Alexey Samsonova25b3462012-06-06 16:15:07 +0000114void *MmapOrDie(uptr size, const char *mem_type) {
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +0000115 size = RoundUpTo(size, GetPageSizeCached());
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000116 uptr res = internal_mmap(0, size,
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000117 PROT_READ | PROT_WRITE,
118 MAP_PRIVATE | MAP_ANON, -1, 0);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000119 int reserrno;
120 if (internal_iserror(res, &reserrno)) {
Kostya Serebryanye8216fd2012-10-04 07:21:09 +0000121 static int recursion_count;
122 if (recursion_count) {
123 // The Report() and CHECK calls below may call mmap recursively and fail.
124 // If we went into recursion, just die.
Dmitry Vyukovc8490e22013-01-29 09:39:58 +0000125 RawWrite("ERROR: Failed to mmap\n");
Kostya Serebryanye8216fd2012-10-04 07:21:09 +0000126 Die();
127 }
128 recursion_count++;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700129 Report("ERROR: %s failed to "
130 "allocate 0x%zx (%zd) bytes of %s (errno: %d)\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000131 SanitizerToolName, size, size, mem_type, reserrno);
Kostya Serebryany0aa04b32012-08-14 15:18:40 +0000132 DumpProcessMap();
Alexey Samsonova25b3462012-06-06 16:15:07 +0000133 CHECK("unable to mmap" && 0);
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000134 }
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700135 IncreaseTotalMmap(size);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000136 return (void *)res;
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000137}
138
139void UnmapOrDie(void *addr, uptr size) {
140 if (!addr || !size) return;
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000141 uptr res = internal_munmap(addr, size);
142 if (internal_iserror(res)) {
Kostya Serebryany859778a2013-01-31 14:11:21 +0000143 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
144 SanitizerToolName, size, size, addr);
Alexey Samsonova25b3462012-06-06 16:15:07 +0000145 CHECK("unable to unmap" && 0);
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000146 }
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700147 DecreaseTotalMmap(size);
148}
149
150void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
151 uptr PageSize = GetPageSizeCached();
152 uptr p = internal_mmap(0,
153 RoundUpTo(size, PageSize),
154 PROT_READ | PROT_WRITE,
155 MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
156 -1, 0);
157 int reserrno;
158 if (internal_iserror(p, &reserrno)) {
159 Report("ERROR: %s failed to "
160 "allocate noreserve 0x%zx (%zd) bytes for '%s' (errno: %d)\n",
161 SanitizerToolName, size, size, mem_type, reserrno);
162 CHECK("unable to mmap" && 0);
163 }
164 IncreaseTotalMmap(size);
165 return (void *)p;
Alexey Samsonov230c3be2012-06-06 09:26:25 +0000166}
167
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000168void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
169 uptr PageSize = GetPageSizeCached();
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000170 uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000171 RoundUpTo(size, PageSize),
172 PROT_READ | PROT_WRITE,
173 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
174 -1, 0);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000175 int reserrno;
176 if (internal_iserror(p, &reserrno)) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700177 Report("ERROR: %s failed to "
178 "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000179 SanitizerToolName, size, size, fixed_addr, reserrno);
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000180 CHECK("unable to mmap" && 0);
181 }
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700182 IncreaseTotalMmap(size);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000183 return (void *)p;
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000184}
185
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700186bool MprotectNoAccess(uptr addr, uptr size) {
187 return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
188}
189
190fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700191 int flags;
192 switch (mode) {
193 case RdOnly: flags = O_RDONLY; break;
194 case WrOnly: flags = O_WRONLY | O_CREAT; break;
195 case RdWr: flags = O_RDWR | O_CREAT; break;
196 }
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700197 fd_t res = internal_open(filename, flags, 0660);
198 if (internal_iserror(res, errno_p))
199 return kInvalidFd;
200 return res;
201}
202
203void CloseFile(fd_t fd) {
204 internal_close(fd);
205}
206
207bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
208 error_t *error_p) {
209 uptr res = internal_read(fd, buff, buff_size);
210 if (internal_iserror(res, error_p))
211 return false;
212 if (bytes_read)
213 *bytes_read = res;
214 return true;
215}
216
217bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
218 error_t *error_p) {
219 uptr res = internal_write(fd, buff, buff_size);
220 if (internal_iserror(res, error_p))
221 return false;
222 if (bytes_written)
223 *bytes_written = res;
224 return true;
225}
226
227bool RenameFile(const char *oldpath, const char *newpath, error_t *error_p) {
228 uptr res = internal_rename(oldpath, newpath);
229 return !internal_iserror(res, error_p);
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700230}
231
Alexey Samsonova68633f2012-07-03 08:24:14 +0000232void *MapFileToMemory(const char *file_name, uptr *buff_size) {
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700233 fd_t fd = OpenFile(file_name, RdOnly);
234 CHECK(fd != kInvalidFd);
Alexey Samsonova68633f2012-07-03 08:24:14 +0000235 uptr fsize = internal_filesize(fd);
236 CHECK_NE(fsize, (uptr)-1);
237 CHECK_GT(fsize, 0);
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +0000238 *buff_size = RoundUpTo(fsize, GetPageSizeCached());
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000239 uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
240 return internal_iserror(map) ? 0 : (void *)map;
Alexey Samsonova68633f2012-07-03 08:24:14 +0000241}
242
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700243void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, uptr offset) {
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700244 uptr flags = MAP_SHARED;
245 if (addr) flags |= MAP_FIXED;
246 uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700247 int mmap_errno = 0;
248 if (internal_iserror(p, &mmap_errno)) {
Pirama Arumuga Nainar259f7062015-05-06 11:49:53 -0700249 Printf("could not map writable file (%d, %zu, %zu): %zd, errno: %d\n",
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700250 fd, offset, size, p, mmap_errno);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700251 return 0;
252 }
253 return (void *)p;
254}
Alexey Samsonova68633f2012-07-03 08:24:14 +0000255
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000256static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
257 uptr start2, uptr end2) {
258 CHECK(start1 <= end1);
259 CHECK(start2 <= end2);
260 return (end1 < start2) || (end2 < start1);
261}
262
263// FIXME: this is thread-unsafe, but should not cause problems most of the time.
264// When the shadow is mapped only a single thread usually exists (plus maybe
265// several worker threads on Mac, which aren't expected to map big chunks of
266// memory).
267bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000268 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000269 uptr start, end;
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000270 while (proc_maps.Next(&start, &end,
271 /*offset*/0, /*filename*/0, /*filename_size*/0,
272 /*protection*/0)) {
Stephen Hines86277eb2015-03-23 12:06:32 -0700273 CHECK_NE(0, end);
274 if (!IntervalsAreSeparate(start, end - 1, range_start, range_end))
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000275 return false;
276 }
277 return true;
278}
279
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000280void DumpProcessMap() {
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000281 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000282 uptr start, end;
283 const sptr kBufSize = 4095;
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700284 char *filename = (char*)MmapOrDie(kBufSize, __func__);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000285 Report("Process memory map follows:\n");
286 while (proc_maps.Next(&start, &end, /* file_offset */0,
Alexey Samsonov45717c92013-03-13 06:51:02 +0000287 filename, kBufSize, /* protection */0)) {
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000288 Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
289 }
290 Report("End of process memory map.\n");
Alexey Samsonovb84ee022012-06-15 07:41:23 +0000291 UnmapOrDie(filename, kBufSize);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000292}
293
Alexey Samsonov0969bcf2012-06-18 08:44:30 +0000294const char *GetPwd() {
295 return GetEnv("PWD");
296}
297
Alexey Samsonov1dcd1d92013-09-03 13:20:48 +0000298char *FindPathToBinary(const char *name) {
299 const char *path = GetEnv("PATH");
300 if (!path)
301 return 0;
302 uptr name_len = internal_strlen(name);
303 InternalScopedBuffer<char> buffer(kMaxPathLength);
304 const char *beg = path;
305 while (true) {
306 const char *end = internal_strchrnul(beg, ':');
307 uptr prefix_len = end - beg;
308 if (prefix_len + name_len + 2 <= kMaxPathLength) {
309 internal_memcpy(buffer.data(), beg, prefix_len);
310 buffer[prefix_len] = '/';
311 internal_memcpy(&buffer[prefix_len + 1], name, name_len);
312 buffer[prefix_len + 1 + name_len] = '\0';
313 if (FileExists(buffer.data()))
314 return internal_strdup(buffer.data());
315 }
316 if (*end == '\0') break;
317 beg = end + 1;
318 }
319 return 0;
320}
321
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700322bool IsPathSeparator(const char c) {
323 return c == '/';
324}
325
326bool IsAbsolutePath(const char *path) {
327 return path != nullptr && IsPathSeparator(path[0]);
328}
329
Stephen Hines86277eb2015-03-23 12:06:32 -0700330void ReportFile::Write(const char *buffer, uptr length) {
331 SpinMutexLock l(mu);
332 static const char *kWriteError =
333 "ReportFile::Write() can't output requested buffer!\n";
334 ReopenIfNecessary();
335 if (length != internal_write(fd, buffer, length)) {
336 internal_write(fd, kWriteError, internal_strlen(kWriteError));
Reid Kleckner923bac72013-09-05 03:19:57 +0000337 Die();
338 }
339}
340
Dmitry Vyukov821acfa2013-09-21 21:41:08 +0000341bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
342 uptr s, e, off, prot;
Stephen Hines86277eb2015-03-23 12:06:32 -0700343 InternalScopedString buff(kMaxPathLength);
Dmitry Vyukov821acfa2013-09-21 21:41:08 +0000344 MemoryMappingLayout proc_maps(/*cache_enabled*/false);
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700345 while (proc_maps.Next(&s, &e, &off, buff.data(), buff.size(), &prot)) {
Dmitry Vyukov821acfa2013-09-21 21:41:08 +0000346 if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700347 && internal_strcmp(module, buff.data()) == 0) {
Dmitry Vyukov821acfa2013-09-21 21:41:08 +0000348 *start = s;
349 *end = e;
350 return true;
351 }
352 }
353 return false;
354}
355
Pirama Arumuga Nainar7c915052015-04-08 08:58:29 -0700356SignalContext SignalContext::Create(void *siginfo, void *context) {
357 uptr addr = (uptr)((siginfo_t*)siginfo)->si_addr;
358 uptr pc, sp, bp;
359 GetPcSpBp(context, &pc, &sp, &bp);
360 return SignalContext(context, addr, pc, sp, bp);
361}
362
Alexey Samsonov1f11d312012-06-05 09:49:25 +0000363} // namespace __sanitizer
364
Stephen Hines2d1fdb22014-05-28 23:58:16 -0700365#endif // SANITIZER_POSIX