blob: ffe91df1a59e6bed9bcd80e93e4608da7c466bc6 [file] [log] [blame]
Alexey Samsonov1f11d312012-06-05 09:49:25 +00001//===-- sanitizer_posix.cc ------------------------------------------------===//
2//
3// The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file is shared between AddressSanitizer and ThreadSanitizer
11// run-time libraries and implements POSIX-specific functions from
12// sanitizer_libc.h.
13//===----------------------------------------------------------------------===//
Evgeniy Stepanov24e13722013-03-19 14:33:38 +000014
15#include "sanitizer_platform.h"
16#if SANITIZER_LINUX || SANITIZER_MAC
Alexey Samsonov1f11d312012-06-05 09:49:25 +000017
Alexey Samsonov230c3be2012-06-06 09:26:25 +000018#include "sanitizer_common.h"
Alexey Samsonov1f11d312012-06-05 09:49:25 +000019#include "sanitizer_libc.h"
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000020#include "sanitizer_procmaps.h"
Sergey Matveev736cf492013-05-08 12:45:55 +000021#include "sanitizer_stacktrace.h"
Alexey Samsonov1f11d312012-06-05 09:49:25 +000022
Alexey Samsonov230c3be2012-06-06 09:26:25 +000023#include <sys/mman.h>
Alexey Samsonov1f11d312012-06-05 09:49:25 +000024
25namespace __sanitizer {
26
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000027// ------------- sanitizer_common.h
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000028uptr GetMmapGranularity() {
29 return GetPageSize();
30}
Alexey Samsonovbe7420c2012-06-15 06:08:19 +000031
Timur Iskhodzhanovbb7f2d82013-07-16 09:47:39 +000032uptr GetMaxVirtualAddress() {
33#if SANITIZER_WORDSIZE == 64
34# if defined(__powerpc64__)
35 // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
36 // We somehow need to figure our which one we are using now and choose
37 // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
38 // Note that with 'ulimit -s unlimited' the stack is moved away from the top
39 // of the address space, so simply checking the stack address is not enough.
40 return (1ULL << 44) - 1; // 0x00000fffffffffffUL
41# else
42 return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
43# endif
44#else // SANITIZER_WORDSIZE == 32
45 // FIXME: We can probably lower this on Android?
46 return (1ULL << 32) - 1; // 0xffffffff;
47#endif // SANITIZER_WORDSIZE
48}
49
Alexey Samsonova25b3462012-06-06 16:15:07 +000050void *MmapOrDie(uptr size, const char *mem_type) {
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000051 size = RoundUpTo(size, GetPageSizeCached());
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000052 uptr res = internal_mmap(0, size,
Alexey Samsonov230c3be2012-06-06 09:26:25 +000053 PROT_READ | PROT_WRITE,
54 MAP_PRIVATE | MAP_ANON, -1, 0);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000055 int reserrno;
56 if (internal_iserror(res, &reserrno)) {
Kostya Serebryanye8216fd2012-10-04 07:21:09 +000057 static int recursion_count;
58 if (recursion_count) {
59 // The Report() and CHECK calls below may call mmap recursively and fail.
60 // If we went into recursion, just die.
Dmitry Vyukovc8490e22013-01-29 09:39:58 +000061 RawWrite("ERROR: Failed to mmap\n");
Kostya Serebryanye8216fd2012-10-04 07:21:09 +000062 Die();
63 }
64 recursion_count++;
Alexander Potapenkoa8d37a02013-03-11 10:21:28 +000065 Report("ERROR: %s failed to allocate 0x%zx (%zd) bytes of %s: %d\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000066 SanitizerToolName, size, size, mem_type, reserrno);
Kostya Serebryany0aa04b32012-08-14 15:18:40 +000067 DumpProcessMap();
Alexey Samsonova25b3462012-06-06 16:15:07 +000068 CHECK("unable to mmap" && 0);
Alexey Samsonov230c3be2012-06-06 09:26:25 +000069 }
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000070 return (void *)res;
Alexey Samsonov230c3be2012-06-06 09:26:25 +000071}
72
73void UnmapOrDie(void *addr, uptr size) {
74 if (!addr || !size) return;
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000075 uptr res = internal_munmap(addr, size);
76 if (internal_iserror(res)) {
Kostya Serebryany859778a2013-01-31 14:11:21 +000077 Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
78 SanitizerToolName, size, size, addr);
Alexey Samsonova25b3462012-06-06 16:15:07 +000079 CHECK("unable to unmap" && 0);
Alexey Samsonov230c3be2012-06-06 09:26:25 +000080 }
81}
82
Alexey Samsonovf607fc12012-06-14 14:42:58 +000083void *MmapFixedNoReserve(uptr fixed_addr, uptr size) {
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000084 uptr PageSize = GetPageSizeCached();
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000085 uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +000086 RoundUpTo(size, PageSize),
Dmitry Vyukov6b641c52012-11-06 16:48:46 +000087 PROT_READ | PROT_WRITE,
88 MAP_PRIVATE | MAP_ANON | MAP_FIXED | MAP_NORESERVE,
89 -1, 0);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000090 int reserrno;
91 if (internal_iserror(p, &reserrno))
Kostya Serebryany859778a2013-01-31 14:11:21 +000092 Report("ERROR: "
93 "%s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +000094 SanitizerToolName, size, size, fixed_addr, reserrno);
95 return (void *)p;
Alexey Samsonovf607fc12012-06-14 14:42:58 +000096}
97
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +000098void *MmapFixedOrDie(uptr fixed_addr, uptr size) {
99 uptr PageSize = GetPageSizeCached();
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000100 uptr p = internal_mmap((void*)(fixed_addr & ~(PageSize - 1)),
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000101 RoundUpTo(size, PageSize),
102 PROT_READ | PROT_WRITE,
103 MAP_PRIVATE | MAP_ANON | MAP_FIXED,
104 -1, 0);
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000105 int reserrno;
106 if (internal_iserror(p, &reserrno)) {
Kostya Serebryany859778a2013-01-31 14:11:21 +0000107 Report("ERROR:"
108 " %s failed to allocate 0x%zx (%zd) bytes at address %p (%d)\n",
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000109 SanitizerToolName, size, size, fixed_addr, reserrno);
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000110 CHECK("unable to mmap" && 0);
111 }
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000112 return (void *)p;
Kostya Serebryany9bfe78f2012-12-13 05:36:00 +0000113}
114
Alexey Samsonovf607fc12012-06-14 14:42:58 +0000115void *Mprotect(uptr fixed_addr, uptr size) {
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000116 return (void *)internal_mmap((void*)fixed_addr, size,
117 PROT_NONE,
118 MAP_PRIVATE | MAP_ANON | MAP_FIXED |
119 MAP_NORESERVE, -1, 0);
Alexey Samsonovf607fc12012-06-14 14:42:58 +0000120}
121
Alexey Samsonova68633f2012-07-03 08:24:14 +0000122void *MapFileToMemory(const char *file_name, uptr *buff_size) {
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000123 uptr openrv = OpenFile(file_name, false);
124 CHECK(!internal_iserror(openrv));
125 fd_t fd = openrv;
Alexey Samsonova68633f2012-07-03 08:24:14 +0000126 uptr fsize = internal_filesize(fd);
127 CHECK_NE(fsize, (uptr)-1);
128 CHECK_GT(fsize, 0);
Kostya Serebryanyf67ec2b2012-11-23 15:38:49 +0000129 *buff_size = RoundUpTo(fsize, GetPageSizeCached());
Peter Collingbourne9578a3e2013-05-08 14:43:49 +0000130 uptr map = internal_mmap(0, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
131 return internal_iserror(map) ? 0 : (void *)map;
Alexey Samsonova68633f2012-07-03 08:24:14 +0000132}
133
134
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000135static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
136 uptr start2, uptr end2) {
137 CHECK(start1 <= end1);
138 CHECK(start2 <= end2);
139 return (end1 < start2) || (end2 < start1);
140}
141
142// FIXME: this is thread-unsafe, but should not cause problems most of the time.
143// When the shadow is mapped only a single thread usually exists (plus maybe
144// several worker threads on Mac, which aren't expected to map big chunks of
145// memory).
146bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000147 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000148 uptr start, end;
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000149 while (proc_maps.Next(&start, &end,
150 /*offset*/0, /*filename*/0, /*filename_size*/0,
151 /*protection*/0)) {
Alexey Samsonovdd3a9112012-06-15 07:29:14 +0000152 if (!IntervalsAreSeparate(start, end, range_start, range_end))
153 return false;
154 }
155 return true;
156}
157
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000158void DumpProcessMap() {
Alexander Potapenko9ae28832013-03-26 10:34:37 +0000159 MemoryMappingLayout proc_maps(/*cache_enabled*/true);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000160 uptr start, end;
161 const sptr kBufSize = 4095;
Alexey Samsonovb84ee022012-06-15 07:41:23 +0000162 char *filename = (char*)MmapOrDie(kBufSize, __FUNCTION__);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000163 Report("Process memory map follows:\n");
164 while (proc_maps.Next(&start, &end, /* file_offset */0,
Alexey Samsonov45717c92013-03-13 06:51:02 +0000165 filename, kBufSize, /* protection */0)) {
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000166 Printf("\t%p-%p\t%s\n", (void*)start, (void*)end, filename);
167 }
168 Report("End of process memory map.\n");
Alexey Samsonovb84ee022012-06-15 07:41:23 +0000169 UnmapOrDie(filename, kBufSize);
Alexey Samsonovbe7420c2012-06-15 06:08:19 +0000170}
171
Alexey Samsonov0969bcf2012-06-18 08:44:30 +0000172const char *GetPwd() {
173 return GetEnv("PWD");
174}
175
Alexey Samsonov1dcd1d92013-09-03 13:20:48 +0000176char *FindPathToBinary(const char *name) {
177 const char *path = GetEnv("PATH");
178 if (!path)
179 return 0;
180 uptr name_len = internal_strlen(name);
181 InternalScopedBuffer<char> buffer(kMaxPathLength);
182 const char *beg = path;
183 while (true) {
184 const char *end = internal_strchrnul(beg, ':');
185 uptr prefix_len = end - beg;
186 if (prefix_len + name_len + 2 <= kMaxPathLength) {
187 internal_memcpy(buffer.data(), beg, prefix_len);
188 buffer[prefix_len] = '/';
189 internal_memcpy(&buffer[prefix_len + 1], name, name_len);
190 buffer[prefix_len + 1 + name_len] = '\0';
191 if (FileExists(buffer.data()))
192 return internal_strdup(buffer.data());
193 }
194 if (*end == '\0') break;
195 beg = end + 1;
196 }
197 return 0;
198}
199
Reid Kleckner923bac72013-09-05 03:19:57 +0000200void MaybeOpenReportFile() {
201 if (!log_to_file || (report_fd_pid == internal_getpid())) return;
202 InternalScopedBuffer<char> report_path_full(4096);
203 internal_snprintf(report_path_full.data(), report_path_full.size(),
204 "%s.%d", report_path_prefix, internal_getpid());
205 uptr openrv = OpenFile(report_path_full.data(), true);
206 if (internal_iserror(openrv)) {
207 report_fd = kStderrFd;
208 log_to_file = false;
209 Report("ERROR: Can't open file: %s\n", report_path_full.data());
210 Die();
211 }
212 if (report_fd != kInvalidFd) {
213 // We're in the child. Close the parent's log.
214 internal_close(report_fd);
215 }
216 report_fd = openrv;
217 report_fd_pid = internal_getpid();
218}
219
220void RawWrite(const char *buffer) {
221 static const char *kRawWriteError =
222 "RawWrite can't output requested buffer!\n";
223 uptr length = (uptr)internal_strlen(buffer);
224 MaybeOpenReportFile();
225 if (length != internal_write(report_fd, buffer, length)) {
226 internal_write(report_fd, kRawWriteError, internal_strlen(kRawWriteError));
227 Die();
228 }
229}
230
Dmitry Vyukov821acfa2013-09-21 21:41:08 +0000231bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
232 uptr s, e, off, prot;
233 InternalMmapVector<char> fn(4096);
234 fn.push_back(0);
235 MemoryMappingLayout proc_maps(/*cache_enabled*/false);
236 while (proc_maps.Next(&s, &e, &off, &fn[0], fn.capacity(), &prot)) {
237 if ((prot & MemoryMappingLayout::kProtectionExecute) != 0
238 && internal_strcmp(module, &fn[0]) == 0) {
239 *start = s;
240 *end = e;
241 return true;
242 }
243 }
244 return false;
245}
246
Alexey Samsonov1f11d312012-06-05 09:49:25 +0000247} // namespace __sanitizer
248
Sergey Matveev736cf492013-05-08 12:45:55 +0000249#endif // SANITIZER_LINUX || SANITIZER_MAC