blob: 6c1c86765c82342fadd5682c6a07c117e161ac4e [file] [log] [blame]
Brian Carlstrom27ec9612011-09-19 20:20:38 -07001/*
2 * Copyright (C) 2008 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070018#include "thread-inl.h"
Brian Carlstrom27ec9612011-09-19 20:20:38 -070019
Ian Rogersdebeb3a2014-01-23 16:54:52 -080020#include <inttypes.h>
Christopher Ferris943af7d2014-01-16 12:41:46 -080021#include <backtrace/BacktraceMap.h>
Ian Rogers700a4022014-05-19 16:49:03 -070022#include <memory>
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070023
Andreas Gamped8f26db2014-05-19 17:01:13 -070024// See CreateStartPos below.
25#ifdef __BIONIC__
26#include <sys/auxv.h>
27#endif
28
Elliott Hughese222ee02012-12-13 14:41:43 -080029#include "base/stringprintf.h"
30#include "ScopedFd.h"
31#include "utils.h"
32
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080033#define USE_ASHMEM 1
34
35#ifdef USE_ASHMEM
36#include <cutils/ashmem.h>
Ian Rogers997f0f92014-06-21 22:58:05 -070037#ifndef ANDROID_OS
38#include <sys/resource.h>
39#endif
Elliott Hughes6c9c06d2011-11-07 16:43:47 -080040#endif
41
Brian Carlstrom27ec9612011-09-19 20:20:38 -070042namespace art {
43
Christopher Ferris943af7d2014-01-16 12:41:46 -080044static std::ostream& operator<<(
45 std::ostream& os,
46 std::pair<BacktraceMap::const_iterator, BacktraceMap::const_iterator> iters) {
47 for (BacktraceMap::const_iterator it = iters.first; it != iters.second; ++it) {
48 os << StringPrintf("0x%08x-0x%08x %c%c%c %s\n",
49 static_cast<uint32_t>(it->start),
50 static_cast<uint32_t>(it->end),
51 (it->flags & PROT_READ) ? 'r' : '-',
52 (it->flags & PROT_WRITE) ? 'w' : '-',
53 (it->flags & PROT_EXEC) ? 'x' : '-', it->name.c_str());
Elliott Hughesecd3a6f2012-06-06 18:16:37 -070054 }
55 return os;
Brian Carlstrom27ec9612011-09-19 20:20:38 -070056}
57
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -070058std::ostream& operator<<(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
59 os << "MemMap:" << std::endl;
60 for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
61 void* base = it->first;
62 MemMap* map = it->second;
63 CHECK_EQ(base, map->BaseBegin());
64 os << *map << std::endl;
65 }
66 return os;
67}
68
69std::multimap<void*, MemMap*> MemMap::maps_;
70
Stuart Monteith8dba5aa2014-03-12 12:44:01 +000071#if defined(__LP64__) && !defined(__x86_64__)
Andreas Gamped8f26db2014-05-19 17:01:13 -070072// Handling mem_map in 32b address range for 64b architectures that do not support MAP_32BIT.
73
74// The regular start of memory allocations. The first 64KB is protected by SELinux.
Andreas Gampe6bd621a2014-05-16 17:28:58 -070075static constexpr uintptr_t LOW_MEM_START = 64 * KB;
Andreas Gampe7104cbf2014-03-21 11:44:43 -070076
Andreas Gamped8f26db2014-05-19 17:01:13 -070077// Generate random starting position.
78// To not interfere with image position, take the image's address and only place it below. Current
79// formula (sketch):
80//
81// ART_BASE_ADDR = 0001XXXXXXXXXXXXXXX
82// ----------------------------------------
83// = 0000111111111111111
84// & ~(kPageSize - 1) =~0000000000000001111
85// ----------------------------------------
86// mask = 0000111111111110000
87// & random data = YYYYYYYYYYYYYYYYYYY
88// -----------------------------------
89// tmp = 0000YYYYYYYYYYY0000
90// + LOW_MEM_START = 0000000000001000000
91// --------------------------------------
92// start
93//
94// getauxval as an entropy source is exposed in Bionic, but not in glibc before 2.16. When we
95// do not have Bionic, simply start with LOW_MEM_START.
96
97// Function is standalone so it can be tested somewhat in mem_map_test.cc.
98#ifdef __BIONIC__
99uintptr_t CreateStartPos(uint64_t input) {
100 CHECK_NE(0, ART_BASE_ADDRESS);
101
102 // Start with all bits below highest bit in ART_BASE_ADDRESS.
103 constexpr size_t leading_zeros = CLZ(static_cast<uint32_t>(ART_BASE_ADDRESS));
104 constexpr uintptr_t mask_ones = (1 << (31 - leading_zeros)) - 1;
105
106 // Lowest (usually 12) bits are not used, as aligned by page size.
107 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1);
108
109 // Mask input data.
110 return (input & mask) + LOW_MEM_START;
111}
112#endif
113
114static uintptr_t GenerateNextMemPos() {
115#ifdef __BIONIC__
116 uint8_t* random_data = reinterpret_cast<uint8_t*>(getauxval(AT_RANDOM));
117 // The lower 8B are taken for the stack guard. Use the upper 8B (with mask).
118 return CreateStartPos(*reinterpret_cast<uintptr_t*>(random_data + 8));
119#else
120 // No auxv on host, see above.
121 return LOW_MEM_START;
122#endif
123}
124
125// Initialize linear scan to random position.
126uintptr_t MemMap::next_mem_pos_ = GenerateNextMemPos();
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000127#endif
128
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700129static bool CheckMapRequest(byte* expected_ptr, void* actual_ptr, size_t byte_count,
130 std::ostringstream* error_msg) {
131 // Handled first by caller for more specific error messages.
132 CHECK(actual_ptr != MAP_FAILED);
133
134 if (expected_ptr == nullptr) {
135 return true;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700136 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700137
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700138 if (expected_ptr == actual_ptr) {
139 return true;
140 }
141
142 // We asked for an address but didn't get what we wanted, all paths below here should fail.
143 int result = munmap(actual_ptr, byte_count);
144 if (result == -1) {
145 PLOG(WARNING) << StringPrintf("munmap(%p, %zd) failed", actual_ptr, byte_count);
146 }
147
148 uintptr_t actual = reinterpret_cast<uintptr_t>(actual_ptr);
149 uintptr_t expected = reinterpret_cast<uintptr_t>(expected_ptr);
150 uintptr_t limit = expected + byte_count;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700151
Ian Rogers700a4022014-05-19 16:49:03 -0700152 std::unique_ptr<BacktraceMap> map(BacktraceMap::Create(getpid()));
Christopher Ferriscaf22ac2014-01-27 18:32:14 -0800153 if (!map->Build()) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700154 *error_msg << StringPrintf("Failed to build process map to determine why mmap returned "
155 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
156
157 return false;
Christopher Ferris943af7d2014-01-16 12:41:46 -0800158 }
Christopher Ferriscaf22ac2014-01-27 18:32:14 -0800159 for (BacktraceMap::const_iterator it = map->begin(); it != map->end(); ++it) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700160 if ((expected >= it->start && expected < it->end) // start of new within old
161 || (limit > it->start && limit < it->end) // end of new within old
162 || (expected <= it->start && limit > it->end)) { // start/end of new includes all of old
163 *error_msg
164 << StringPrintf("Requested region 0x%08" PRIxPTR "-0x%08" PRIxPTR " overlaps with "
165 "existing map 0x%08" PRIxPTR "-0x%08" PRIxPTR " (%s)\n",
166 expected, limit,
167 static_cast<uintptr_t>(it->start), static_cast<uintptr_t>(it->end),
168 it->name.c_str())
169 << std::make_pair(it, map->end());
170 return false;
171 }
Elliott Hughes96970cd2012-03-13 15:46:31 -0700172 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700173 *error_msg << StringPrintf("Failed to mmap at expected address, mapped at "
174 "0x%08" PRIxPTR " instead of 0x%08" PRIxPTR, actual, expected);
175 return false;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700176}
177
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700178MemMap* MemMap::MapAnonymous(const char* name, byte* expected, size_t byte_count, int prot,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800179 bool low_4gb, std::string* error_msg) {
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700180 if (byte_count == 0) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700181 return new MemMap(name, nullptr, 0, nullptr, 0, prot);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700182 }
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700183 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800184
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800185 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
Ian Rogers997f0f92014-06-21 22:58:05 -0700186 ScopedFd fd(-1);
187
188#ifdef USE_ASHMEM
189#ifdef HAVE_ANDROID_OS
190 const bool use_ashmem = true;
191#else
192 // When not on Android ashmem is faked using files in /tmp. Ensure that such files won't
193 // fail due to ulimit restrictions. If they will then use a regular mmap.
194 struct rlimit rlimit_fsize;
195 CHECK_EQ(getrlimit(RLIMIT_FSIZE, &rlimit_fsize), 0);
196 const bool use_ashmem = (rlimit_fsize.rlim_cur == RLIM_INFINITY) ||
197 (page_aligned_byte_count < rlimit_fsize.rlim_cur);
198#endif
199 if (use_ashmem) {
200 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
201 // prefixed "dalvik-".
202 std::string debug_friendly_name("dalvik-");
203 debug_friendly_name += name;
204 fd.reset(ashmem_create_region(debug_friendly_name.c_str(), page_aligned_byte_count));
205 if (fd.get() == -1) {
206 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s", name, strerror(errno));
207 return nullptr;
208 }
209 flags = MAP_PRIVATE;
210 }
Elliott Hughes6c9c06d2011-11-07 16:43:47 -0800211#endif
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000212
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700213 // We need to store and potentially set an error number for pretty printing of errors
214 int saved_errno = 0;
215
Qiming Shi84d49cc2014-04-24 15:38:41 +0800216#ifdef __LP64__
217 // When requesting low_4g memory and having an expectation, the requested range should fit into
218 // 4GB.
219 if (low_4gb && (
220 // Start out of bounds.
221 (reinterpret_cast<uintptr_t>(expected) >> 32) != 0 ||
222 // End out of bounds. For simplicity, this will fail for the last page of memory.
223 (reinterpret_cast<uintptr_t>(expected + page_aligned_byte_count) >> 32) != 0)) {
224 *error_msg = StringPrintf("The requested address space (%p, %p) cannot fit in low_4gb",
225 expected, expected + page_aligned_byte_count);
226 return nullptr;
227 }
228#endif
229
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000230 // TODO:
231 // A page allocator would be a useful abstraction here, as
232 // 1) It is doubtful that MAP_32BIT on x86_64 is doing the right job for us
233 // 2) The linear scheme, even with simple saving of the last known position, is very crude
234#if defined(__LP64__) && !defined(__x86_64__)
235 // MAP_32BIT only available on x86_64.
236 void* actual = MAP_FAILED;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000237 if (low_4gb && expected == nullptr) {
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700238 bool first_run = true;
239
Andreas Gampe71a3eba2014-03-17 12:57:08 -0700240 for (uintptr_t ptr = next_mem_pos_; ptr < 4 * GB; ptr += kPageSize) {
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700241 if (4U * GB - ptr < page_aligned_byte_count) {
242 // Not enough memory until 4GB.
243 if (first_run) {
244 // Try another time from the bottom;
Andreas Gampe9de65ff2014-03-21 17:25:57 -0700245 ptr = LOW_MEM_START - kPageSize;
Andreas Gampe7104cbf2014-03-21 11:44:43 -0700246 first_run = false;
247 continue;
248 } else {
249 // Second try failed.
250 break;
251 }
252 }
253
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000254 uintptr_t tail_ptr;
255
256 // Check pages are free.
257 bool safe = true;
258 for (tail_ptr = ptr; tail_ptr < ptr + page_aligned_byte_count; tail_ptr += kPageSize) {
259 if (msync(reinterpret_cast<void*>(tail_ptr), kPageSize, 0) == 0) {
260 safe = false;
261 break;
262 } else {
263 DCHECK_EQ(errno, ENOMEM);
264 }
265 }
266
267 next_mem_pos_ = tail_ptr; // update early, as we break out when we found and mapped a region
268
269 if (safe == true) {
270 actual = mmap(reinterpret_cast<void*>(ptr), page_aligned_byte_count, prot, flags, fd.get(),
271 0);
272 if (actual != MAP_FAILED) {
Mathieu Chartierc355a2a2014-05-30 13:02:46 -0700273 // Since we didn't use MAP_FIXED the kernel may have mapped it somewhere not in the low
274 // 4GB. If this is the case, unmap and retry.
275 if (reinterpret_cast<uintptr_t>(actual) + page_aligned_byte_count < 4 * GB) {
276 break;
277 } else {
278 munmap(actual, page_aligned_byte_count);
279 actual = MAP_FAILED;
280 }
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000281 }
282 } else {
283 // Skip over last page.
284 ptr = tail_ptr;
285 }
286 }
287
288 if (actual == MAP_FAILED) {
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700289 LOG(ERROR) << "Could not find contiguous low-memory space.";
290 saved_errno = ENOMEM;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000291 }
292 } else {
293 actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700294 saved_errno = errno;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000295 }
296
297#else
298#ifdef __x86_64__
Qiming Shi84d49cc2014-04-24 15:38:41 +0800299 if (low_4gb && expected == nullptr) {
Ian Rogersef7d42f2014-01-06 12:55:46 -0800300 flags |= MAP_32BIT;
301 }
302#endif
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700303
304 void* actual = mmap(expected, page_aligned_byte_count, prot, flags, fd.get(), 0);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700305 saved_errno = errno;
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000306#endif
307
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700308 if (actual == MAP_FAILED) {
jeffhao8161c032012-10-31 15:50:00 -0700309 std::string maps;
310 ReadFileToString("/proc/self/maps", &maps);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700311
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700312 *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s\n%s",
313 expected, page_aligned_byte_count, prot, flags, fd.get(),
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700314 strerror(saved_errno), maps.c_str());
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700315 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700316 }
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700317 std::ostringstream check_map_request_error_msg;
318 if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
319 *error_msg = check_map_request_error_msg.str();
320 return nullptr;
321 }
322 return new MemMap(name, reinterpret_cast<byte*>(actual), byte_count, actual,
323 page_aligned_byte_count, prot);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700324}
325
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700326MemMap* MemMap::MapFileAtAddress(byte* expected, size_t byte_count, int prot, int flags, int fd,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700327 off_t start, bool reuse, const char* filename,
328 std::string* error_msg) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700329 CHECK_NE(0, prot);
330 CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700331 if (reuse) {
332 // reuse means it is okay that it overlaps an existing page mapping.
333 // Only use this if you actually made the page reservation yourself.
334 CHECK(expected != nullptr);
335 flags |= MAP_FIXED;
336 } else {
337 CHECK_EQ(0, flags & MAP_FIXED);
338 }
339
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700340 if (byte_count == 0) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700341 return new MemMap(filename, nullptr, 0, nullptr, 0, prot);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700342 }
Ian Rogersf8adc602013-04-18 17:06:19 -0700343 // Adjust 'offset' to be page-aligned as required by mmap.
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700344 int page_offset = start % kPageSize;
345 off_t page_aligned_offset = start - page_offset;
Ian Rogersf8adc602013-04-18 17:06:19 -0700346 // Adjust 'byte_count' to be page-aligned as we will map this anyway.
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700347 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700348 // The 'expected' is modified (if specified, ie non-null) to be page aligned to the file but not
349 // necessarily to virtual memory. mmap will page align 'expected' for us.
350 byte* page_aligned_expected = (expected == nullptr) ? nullptr : (expected - page_offset);
351
352 byte* actual = reinterpret_cast<byte*>(mmap(page_aligned_expected,
Elliott Hughesecd3a6f2012-06-06 18:16:37 -0700353 page_aligned_byte_count,
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700354 prot,
355 flags,
356 fd,
357 page_aligned_offset));
358 if (actual == MAP_FAILED) {
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700359 auto saved_errno = errno;
360
jeffhao8161c032012-10-31 15:50:00 -0700361 std::string maps;
362 ReadFileToString("/proc/self/maps", &maps);
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700363
Mathieu Chartierc7cb1902014-03-05 14:41:03 -0800364 *error_msg = StringPrintf("mmap(%p, %zd, 0x%x, 0x%x, %d, %" PRId64
365 ") of file '%s' failed: %s\n%s",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700366 page_aligned_expected, page_aligned_byte_count, prot, flags, fd,
Brian Carlstromaa94cf32014-03-23 23:47:25 -0700367 static_cast<int64_t>(page_aligned_offset), filename,
368 strerror(saved_errno), maps.c_str());
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700369 return nullptr;
370 }
371 std::ostringstream check_map_request_error_msg;
372 if (!CheckMapRequest(expected, actual, page_aligned_byte_count, &check_map_request_error_msg)) {
373 *error_msg = check_map_request_error_msg.str();
374 return nullptr;
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700375 }
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800376 return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700377 prot);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700378}
379
380MemMap::~MemMap() {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700381 if (base_begin_ == nullptr && base_size_ == 0) {
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700382 return;
383 }
Ian Rogers30fab402012-01-23 15:43:46 -0800384 int result = munmap(base_begin_, base_size_);
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700385 if (result == -1) {
386 PLOG(FATAL) << "munmap failed";
387 }
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700388
389 // Remove it from maps_.
390 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
391 bool found = false;
392 for (auto it = maps_.lower_bound(base_begin_), end = maps_.end();
393 it != end && it->first == base_begin_; ++it) {
394 if (it->second == this) {
395 found = true;
396 maps_.erase(it);
397 break;
398 }
399 }
400 CHECK(found) << "MemMap not found";
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700401}
402
Mathieu Chartier1c23e1e2012-10-12 14:14:11 -0700403MemMap::MemMap(const std::string& name, byte* begin, size_t size, void* base_begin,
404 size_t base_size, int prot)
405 : name_(name), begin_(begin), size_(size), base_begin_(base_begin), base_size_(base_size),
406 prot_(prot) {
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700407 if (size_ == 0) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700408 CHECK(begin_ == nullptr);
409 CHECK(base_begin_ == nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700410 CHECK_EQ(base_size_, 0U);
411 } else {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700412 CHECK(begin_ != nullptr);
413 CHECK(base_begin_ != nullptr);
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700414 CHECK_NE(base_size_, 0U);
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700415
416 // Add it to maps_.
417 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
418 maps_.insert(std::pair<void*, MemMap*>(base_begin_, this));
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700419 }
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700420};
421
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700422MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
423 std::string* error_msg) {
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700424 DCHECK_GE(new_end, Begin());
425 DCHECK_LE(new_end, End());
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700426 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
427 DCHECK(IsAligned<kPageSize>(begin_));
428 DCHECK(IsAligned<kPageSize>(base_begin_));
429 DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
430 DCHECK(IsAligned<kPageSize>(new_end));
431 byte* old_end = begin_ + size_;
432 byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
433 byte* new_base_end = new_end;
434 DCHECK_LE(new_base_end, old_base_end);
435 if (new_base_end == old_base_end) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700436 return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700437 }
438 size_ = new_end - reinterpret_cast<byte*>(begin_);
439 base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
440 DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
441 size_t tail_size = old_end - new_end;
442 byte* tail_base_begin = new_base_end;
443 size_t tail_base_size = old_base_end - new_base_end;
444 DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
445 DCHECK(IsAligned<kPageSize>(tail_base_size));
446
447#ifdef USE_ASHMEM
448 // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
449 // prefixed "dalvik-".
450 std::string debug_friendly_name("dalvik-");
451 debug_friendly_name += tail_name;
452 ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
Stuart Monteith8dba5aa2014-03-12 12:44:01 +0000453 int flags = MAP_PRIVATE | MAP_FIXED;
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700454 if (fd.get() == -1) {
455 *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
456 tail_name, strerror(errno));
457 return nullptr;
458 }
459#else
460 ScopedFd fd(-1);
461 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
462#endif
463
464 // Unmap/map the tail region.
465 int result = munmap(tail_base_begin, tail_base_size);
466 if (result == -1) {
467 std::string maps;
468 ReadFileToString("/proc/self/maps", &maps);
469 *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
470 tail_base_begin, tail_base_size, name_.c_str(),
471 maps.c_str());
472 return nullptr;
473 }
474 // Don't cause memory allocation between the munmap and the mmap
475 // calls. Otherwise, libc (or something else) might take this memory
476 // region. Note this isn't perfect as there's no way to prevent
477 // other threads to try to take this memory region here.
478 byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
479 flags, fd.get(), 0));
480 if (actual == MAP_FAILED) {
481 std::string maps;
482 ReadFileToString("/proc/self/maps", &maps);
Mathieu Chartierc7cb1902014-03-05 14:41:03 -0800483 *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed\n%s",
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700484 tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
485 maps.c_str());
486 return nullptr;
487 }
488 return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
Mathieu Chartiercc236d72012-07-20 10:29:05 -0700489}
Logan Chiend88fa262012-06-06 15:23:32 +0800490
Ian Rogersc5f17732014-06-05 20:48:42 -0700491void MemMap::MadviseDontNeedAndZero() {
492 if (base_begin_ != nullptr || base_size_ != 0) {
493 if (!kMadviseZeroes) {
494 memset(base_begin_, 0, base_size_);
495 }
496 int result = madvise(base_begin_, base_size_, MADV_DONTNEED);
497 if (result == -1) {
498 PLOG(WARNING) << "madvise failed";
499 }
500 }
501}
502
Logan Chiend88fa262012-06-06 15:23:32 +0800503bool MemMap::Protect(int prot) {
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700504 if (base_begin_ == nullptr && base_size_ == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700505 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800506 return true;
507 }
508
509 if (mprotect(base_begin_, base_size_, prot) == 0) {
Ian Rogers1c849e52012-06-28 14:00:33 -0700510 prot_ = prot;
Logan Chiend88fa262012-06-06 15:23:32 +0800511 return true;
512 }
513
Shih-wei Liaoa060ed92012-06-07 09:25:28 -0700514 PLOG(ERROR) << "mprotect(" << reinterpret_cast<void*>(base_begin_) << ", " << base_size_ << ", "
515 << prot << ") failed";
Logan Chiend88fa262012-06-06 15:23:32 +0800516 return false;
517}
518
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700519bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
520 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
521 CHECK(begin_map != nullptr);
522 CHECK(end_map != nullptr);
523 CHECK(HasMemMap(begin_map));
524 CHECK(HasMemMap(end_map));
525 CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
526 MemMap* map = begin_map;
527 while (map->BaseBegin() != end_map->BaseBegin()) {
528 MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
529 if (next_map == nullptr) {
530 // Found a gap.
531 return false;
532 }
533 map = next_map;
534 }
535 return true;
536}
537
538void MemMap::DumpMaps(std::ostream& os) {
539 DumpMaps(os, maps_);
540}
541
542void MemMap::DumpMaps(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
543 MutexLock mu(Thread::Current(), *Locks::mem_maps_lock_);
544 DumpMapsLocked(os, mem_maps);
545}
546
547void MemMap::DumpMapsLocked(std::ostream& os, const std::multimap<void*, MemMap*>& mem_maps) {
548 os << mem_maps;
549}
550
551bool MemMap::HasMemMap(MemMap* map) {
552 void* base_begin = map->BaseBegin();
553 for (auto it = maps_.lower_bound(base_begin), end = maps_.end();
554 it != end && it->first == base_begin; ++it) {
555 if (it->second == map) {
556 return true;
557 }
558 }
559 return false;
560}
561
562MemMap* MemMap::GetLargestMemMapAt(void* address) {
563 size_t largest_size = 0;
564 MemMap* largest_map = nullptr;
565 for (auto it = maps_.lower_bound(address), end = maps_.end();
566 it != end && it->first == address; ++it) {
567 MemMap* map = it->second;
568 CHECK(map != nullptr);
569 if (largest_size < map->BaseSize()) {
570 largest_size = map->BaseSize();
571 largest_map = map;
572 }
573 }
574 return largest_map;
575}
576
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800577std::ostream& operator<<(std::ostream& os, const MemMap& mem_map) {
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700578 os << StringPrintf("[MemMap: %p-%p prot=0x%x %s]",
579 mem_map.BaseBegin(), mem_map.BaseEnd(), mem_map.GetProtect(),
580 mem_map.GetName().c_str());
Brian Carlstrom0d6adac2014-02-05 17:39:16 -0800581 return os;
582}
583
Brian Carlstrom27ec9612011-09-19 20:20:38 -0700584} // namespace art