blob: d956126df103b878925f92debe186cb5bfa576b0 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
David Sehrd5f8de82018-04-27 14:12:03 -070024#include "base/common_art_test.h"
25#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
David Sehr1979c642018-04-26 14:41:18 -070026#include "memory_tool.h"
27#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070028
29namespace art {
30
David Sehrd5f8de82018-04-27 14:12:03 -070031class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070032 public:
Ian Rogers13735952014-10-08 12:43:28 -070033 static uint8_t* BaseBegin(MemMap* mem_map) {
34 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070035 }
Mathieu Chartier16d29f82015-11-10 10:32:52 -080036
Ian Rogersef7d42f2014-01-06 12:55:46 -080037 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070038 return mem_map->base_size_;
39 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080040
Alex Lightca97ada2018-02-02 09:25:31 -080041 static bool IsAddressMapped(void* addr) {
42 bool res = msync(addr, 1, MS_SYNC) == 0;
43 if (!res && errno != ENOMEM) {
44 PLOG(FATAL) << "Unexpected error occurred on msync";
45 }
46 return res;
47 }
48
49 static std::vector<uint8_t> RandomData(size_t size) {
50 std::random_device rd;
51 std::uniform_int_distribution<uint8_t> dist;
52 std::vector<uint8_t> res;
53 res.resize(size);
54 for (size_t i = 0; i < size; i++) {
55 res[i] = dist(rd);
56 }
57 return res;
58 }
59
Mathieu Chartier16d29f82015-11-10 10:32:52 -080060 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
61 // Find a valid map address and unmap it before returning.
62 std::string error_msg;
63 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
64 nullptr,
65 size,
66 PROT_READ,
67 low_4gb,
68 false,
69 &error_msg));
70 CHECK(map != nullptr);
71 return map->Begin();
72 }
73
Ian Rogersef7d42f2014-01-06 12:55:46 -080074 static void RemapAtEndTest(bool low_4gb) {
75 std::string error_msg;
76 // Cast the page size to size_t.
77 const size_t page_size = static_cast<size_t>(kPageSize);
78 // Map a two-page memory region.
79 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
80 nullptr,
81 2 * page_size,
82 PROT_READ | PROT_WRITE,
83 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000084 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080085 &error_msg);
86 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070087 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080088 ASSERT_TRUE(base0 != nullptr) << error_msg;
89 size_t size0 = m0->Size();
90 EXPECT_EQ(m0->Size(), 2 * page_size);
91 EXPECT_EQ(BaseBegin(m0), base0);
92 EXPECT_EQ(BaseSize(m0), size0);
93 memset(base0, 42, 2 * page_size);
94 // Remap the latter half into a second MemMap.
95 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
96 "MemMapTest_RemapAtEndTest_map1",
97 PROT_READ | PROT_WRITE,
98 &error_msg);
99 // Check the states of the two maps.
100 EXPECT_EQ(m0->Begin(), base0) << error_msg;
101 EXPECT_EQ(m0->Size(), page_size);
102 EXPECT_EQ(BaseBegin(m0), base0);
103 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -0700104 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800105 size_t size1 = m1->Size();
106 EXPECT_EQ(base1, base0 + page_size);
107 EXPECT_EQ(size1, page_size);
108 EXPECT_EQ(BaseBegin(m1), base1);
109 EXPECT_EQ(BaseSize(m1), size1);
110 // Write to the second region.
111 memset(base1, 43, page_size);
112 // Check the contents of the two regions.
113 for (size_t i = 0; i < page_size; ++i) {
114 EXPECT_EQ(base0[i], 42);
115 }
116 for (size_t i = 0; i < page_size; ++i) {
117 EXPECT_EQ(base1[i], 43);
118 }
119 // Unmap the first region.
120 delete m0;
121 // Make sure the second region is still accessible after the first
122 // region is unmapped.
123 for (size_t i = 0; i < page_size; ++i) {
124 EXPECT_EQ(base1[i], 43);
125 }
126 delete m1;
127 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700128
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700129 void CommonInit() {
130 MemMap::Init();
131 }
132
Andreas Gamped8f26db2014-05-19 17:01:13 -0700133#if defined(__LP64__) && !defined(__x86_64__)
134 static uintptr_t GetLinearScanPos() {
135 return MemMap::next_mem_pos_;
136 }
137#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700138};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700139
Andreas Gamped8f26db2014-05-19 17:01:13 -0700140#if defined(__LP64__) && !defined(__x86_64__)
141
142#ifdef __BIONIC__
143extern uintptr_t CreateStartPos(uint64_t input);
144#endif
145
146TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700147 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700148 uintptr_t start = GetLinearScanPos();
149 EXPECT_LE(64 * KB, start);
150 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700151#ifdef __BIONIC__
152 // Test a couple of values. Make sure they are different.
153 uintptr_t last = 0;
154 for (size_t i = 0; i < 100; ++i) {
155 uintptr_t random_start = CreateStartPos(i * kPageSize);
156 EXPECT_NE(last, random_start);
157 last = random_start;
158 }
159
160 // Even on max, should be below ART_BASE_ADDRESS.
161 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
162#endif
163 // End of test.
164}
165#endif
166
Alex Lightca97ada2018-02-02 09:25:31 -0800167// We need mremap to be able to test ReplaceMapping at all
168#if HAVE_MREMAP_SYSCALL
169TEST_F(MemMapTest, ReplaceMapping_SameSize) {
170 std::string error_msg;
171 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
172 nullptr,
173 kPageSize,
174 PROT_READ,
175 false,
176 false,
177 &error_msg));
178 ASSERT_TRUE(dest != nullptr);
179 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
180 nullptr,
181 kPageSize,
182 PROT_WRITE | PROT_READ,
183 false,
184 false,
185 &error_msg);
186 ASSERT_TRUE(source != nullptr);
187 void* source_addr = source->Begin();
188 void* dest_addr = dest->Begin();
189 ASSERT_TRUE(IsAddressMapped(source_addr));
190 ASSERT_TRUE(IsAddressMapped(dest_addr));
191
192 std::vector<uint8_t> data = RandomData(kPageSize);
193 memcpy(source->Begin(), data.data(), data.size());
194
195 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
196
197 ASSERT_FALSE(IsAddressMapped(source_addr));
198 ASSERT_TRUE(IsAddressMapped(dest_addr));
199 ASSERT_TRUE(source == nullptr);
200
201 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
202
203 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
204}
205
206TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
207 std::string error_msg;
208 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
209 nullptr,
210 5 * kPageSize, // Need to make it larger
211 // initially so we know
212 // there won't be mappings
213 // in the way we we move
214 // source.
215 PROT_READ,
216 false,
217 false,
218 &error_msg));
219 ASSERT_TRUE(dest != nullptr);
220 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
221 nullptr,
222 3 * kPageSize,
223 PROT_WRITE | PROT_READ,
224 false,
225 false,
226 &error_msg);
227 ASSERT_TRUE(source != nullptr);
228 uint8_t* source_addr = source->Begin();
229 uint8_t* dest_addr = dest->Begin();
230 ASSERT_TRUE(IsAddressMapped(source_addr));
231
232 // Fill the source with random data.
233 std::vector<uint8_t> data = RandomData(3 * kPageSize);
234 memcpy(source->Begin(), data.data(), data.size());
235
236 // Make the dest smaller so that we know we'll have space.
237 dest->SetSize(kPageSize);
238
239 ASSERT_TRUE(IsAddressMapped(dest_addr));
240 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
241 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
242
243 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
244
245 ASSERT_FALSE(IsAddressMapped(source_addr));
246 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
247 ASSERT_TRUE(IsAddressMapped(dest_addr));
248 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
249 ASSERT_TRUE(source == nullptr);
250
251 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
252}
253
254TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
255 std::string error_msg;
256 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
257 nullptr,
258 3 * kPageSize,
259 PROT_READ,
260 false,
261 false,
262 &error_msg));
263 ASSERT_TRUE(dest != nullptr);
264 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
265 nullptr,
266 kPageSize,
267 PROT_WRITE | PROT_READ,
268 false,
269 false,
270 &error_msg);
271 ASSERT_TRUE(source != nullptr);
272 uint8_t* source_addr = source->Begin();
273 uint8_t* dest_addr = dest->Begin();
274 ASSERT_TRUE(IsAddressMapped(source_addr));
275 ASSERT_TRUE(IsAddressMapped(dest_addr));
276 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
277 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
278
279 std::vector<uint8_t> data = RandomData(kPageSize);
280 memcpy(source->Begin(), data.data(), kPageSize);
281
282 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
283
284 ASSERT_FALSE(IsAddressMapped(source_addr));
285 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
286 ASSERT_TRUE(IsAddressMapped(dest_addr));
287 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
288 ASSERT_TRUE(source == nullptr);
289
290 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
291}
292
293TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
294 std::string error_msg;
295 std::unique_ptr<MemMap> dest(
296 MemMap::MapAnonymous(
297 "MapAnonymousEmpty-atomic-replace-dest",
298 nullptr,
299 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
300 // the way we we move source.
301 PROT_READ | PROT_WRITE,
302 false,
303 false,
304 &error_msg));
305 ASSERT_TRUE(dest != nullptr);
306 // Resize down to 1 page so we can remap the rest.
307 dest->SetSize(kPageSize);
308 // Create source from the last 2 pages
309 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
310 dest->Begin() + kPageSize,
311 2 * kPageSize,
312 PROT_WRITE | PROT_READ,
313 false,
314 false,
315 &error_msg);
316 ASSERT_TRUE(source != nullptr);
317 MemMap* orig_source = source;
318 ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
319 uint8_t* source_addr = source->Begin();
320 uint8_t* dest_addr = dest->Begin();
321 ASSERT_TRUE(IsAddressMapped(source_addr));
322
323 // Fill the source and dest with random data.
324 std::vector<uint8_t> data = RandomData(2 * kPageSize);
325 memcpy(source->Begin(), data.data(), data.size());
326 std::vector<uint8_t> dest_data = RandomData(kPageSize);
327 memcpy(dest->Begin(), dest_data.data(), dest_data.size());
328
329 ASSERT_TRUE(IsAddressMapped(dest_addr));
330 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
331
332 ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
333
334 ASSERT_TRUE(source == orig_source);
335 ASSERT_TRUE(IsAddressMapped(source_addr));
336 ASSERT_TRUE(IsAddressMapped(dest_addr));
337 ASSERT_EQ(source->Size(), data.size());
338 ASSERT_EQ(dest->Size(), dest_data.size());
339
340 ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
341 ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
342
343 delete source;
344}
345#endif // HAVE_MREMAP_SYSCALL
346
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700347TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700348 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700349 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700350 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000351 nullptr,
352 0,
353 PROT_READ,
354 false,
355 false,
356 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800357 ASSERT_TRUE(map.get() != nullptr) << error_msg;
358 ASSERT_TRUE(error_msg.empty());
359 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
360 nullptr,
361 kPageSize,
362 PROT_READ | PROT_WRITE,
363 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000364 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800365 &error_msg));
366 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700367 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700368}
369
Mathieu Chartier486932a2016-02-24 10:09:23 -0800370TEST_F(MemMapTest, MapAnonymousFailNullError) {
371 CommonInit();
372 // Test that we don't crash with a null error_str when mapping at an invalid location.
373 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
374 reinterpret_cast<uint8_t*>(kPageSize),
375 0x20000,
376 PROT_READ | PROT_WRITE,
377 false,
378 false,
379 nullptr));
380 ASSERT_EQ(nullptr, map.get());
381}
382
Ian Rogersef7d42f2014-01-06 12:55:46 -0800383#ifdef __LP64__
384TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700385 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700386 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700387 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000388 nullptr,
389 kPageSize,
390 PROT_READ | PROT_WRITE,
391 true,
392 false,
393 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800394 ASSERT_TRUE(map.get() != nullptr) << error_msg;
395 ASSERT_TRUE(error_msg.empty());
396 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700397}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800398TEST_F(MemMapTest, MapFile32Bit) {
399 CommonInit();
400 std::string error_msg;
401 ScratchFile scratch_file;
402 constexpr size_t kMapSize = kPageSize;
403 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
404 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
405 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
406 PROT_READ,
407 MAP_PRIVATE,
408 scratch_file.GetFd(),
409 /*start*/0,
410 /*low_4gb*/true,
411 scratch_file.GetFilename().c_str(),
412 &error_msg));
413 ASSERT_TRUE(map != nullptr) << error_msg;
414 ASSERT_TRUE(error_msg.empty());
415 ASSERT_EQ(map->Size(), kMapSize);
416 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
417}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800418#endif
419
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700420TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700421 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700422 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800423 // Find a valid address.
424 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700425 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700426 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800427 valid_address,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000428 kPageSize,
429 PROT_READ | PROT_WRITE,
430 false,
431 false,
432 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700433 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
434 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800435 ASSERT_TRUE(map0->BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700436 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700437 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000438 nullptr,
439 kPageSize,
440 PROT_READ | PROT_WRITE,
441 false,
442 false,
443 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700444 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
445 ASSERT_TRUE(error_msg.empty());
446 ASSERT_TRUE(map1->BaseBegin() != nullptr);
447 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700448 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000449 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
450 kPageSize,
451 PROT_READ | PROT_WRITE,
452 false,
453 false,
454 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700455 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
456 ASSERT_TRUE(!error_msg.empty());
457}
458
Ian Rogersef7d42f2014-01-06 12:55:46 -0800459TEST_F(MemMapTest, RemapAtEnd) {
460 RemapAtEndTest(false);
461}
462
463#ifdef __LP64__
464TEST_F(MemMapTest, RemapAtEnd32bit) {
465 RemapAtEndTest(true);
466}
467#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700468
Qiming Shi84d49cc2014-04-24 15:38:41 +0800469TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000470 // Some MIPS32 hardware (namely the Creator Ci20 development board)
471 // cannot allocate in the 2GB-4GB region.
472 TEST_DISABLED_FOR_MIPS();
473
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700474 CommonInit();
Andreas Gampe928f72b2014-09-09 19:53:48 -0700475 // This test may not work under valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700476 if (RUNNING_ON_MEMORY_TOOL == 0) {
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800477 constexpr size_t size = 0x100000;
478 // Try all addresses starting from 2GB to 4GB.
479 size_t start_addr = 2 * GB;
Andreas Gampe928f72b2014-09-09 19:53:48 -0700480 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800481 std::unique_ptr<MemMap> map;
482 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
483 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
484 reinterpret_cast<uint8_t*>(start_addr),
485 size,
486 PROT_READ | PROT_WRITE,
487 /*low_4gb*/true,
488 false,
489 &error_msg));
490 if (map != nullptr) {
491 break;
492 }
493 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700494 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Roland Levillain8d026442016-01-19 17:30:33 +0000495 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
Andreas Gampe928f72b2014-09-09 19:53:48 -0700496 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800497 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
Andreas Gampe928f72b2014-09-09 19:53:48 -0700498 }
Qiming Shi84d49cc2014-04-24 15:38:41 +0800499}
500
501TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700502 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800503 std::string error_msg;
504 uintptr_t ptr = 0;
505 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700506 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000507 reinterpret_cast<uint8_t*>(ptr),
508 2 * kPageSize, // brings it over the top.
509 PROT_READ | PROT_WRITE,
510 false,
511 false,
512 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800513 ASSERT_EQ(nullptr, map.get());
514 ASSERT_FALSE(error_msg.empty());
515}
516
517#ifdef __LP64__
518TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700519 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800520 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000521 std::unique_ptr<MemMap> map(
522 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
523 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
524 kPageSize,
525 PROT_READ | PROT_WRITE,
526 true,
527 false,
528 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800529 ASSERT_EQ(nullptr, map.get());
530 ASSERT_FALSE(error_msg.empty());
531}
532
533TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700534 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800535 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700536 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000537 reinterpret_cast<uint8_t*>(0xF0000000),
538 0x20000000,
539 PROT_READ | PROT_WRITE,
540 true,
541 false,
542 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800543 ASSERT_EQ(nullptr, map.get());
544 ASSERT_FALSE(error_msg.empty());
545}
546#endif
547
Vladimir Marko5c42c292015-02-25 12:02:49 +0000548TEST_F(MemMapTest, MapAnonymousReuse) {
549 CommonInit();
550 std::string error_msg;
551 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
552 nullptr,
553 0x20000,
554 PROT_READ | PROT_WRITE,
555 false,
556 false,
557 &error_msg));
558 ASSERT_NE(nullptr, map.get());
559 ASSERT_TRUE(error_msg.empty());
560 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
561 reinterpret_cast<uint8_t*>(map->BaseBegin()),
562 0x10000,
563 PROT_READ | PROT_WRITE,
564 false,
565 true,
566 &error_msg));
567 ASSERT_NE(nullptr, map2.get());
568 ASSERT_TRUE(error_msg.empty());
569}
570
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700571TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700572 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700573 std::string error_msg;
574 constexpr size_t kNumPages = 3;
575 // Map a 3-page mem map.
576 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
577 nullptr,
578 kPageSize * kNumPages,
579 PROT_READ | PROT_WRITE,
580 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000581 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700582 &error_msg));
583 ASSERT_TRUE(map.get() != nullptr) << error_msg;
584 ASSERT_TRUE(error_msg.empty());
585 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700586 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700587 // Unmap it.
588 map.reset();
589
590 // Map at the same address, but in page-sized separate mem maps,
591 // assuming the space at the address is still available.
592 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
593 map_base,
594 kPageSize,
595 PROT_READ | PROT_WRITE,
596 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000597 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700598 &error_msg));
599 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
600 ASSERT_TRUE(error_msg.empty());
601 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
602 map_base + kPageSize,
603 kPageSize,
604 PROT_READ | PROT_WRITE,
605 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000606 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700607 &error_msg));
608 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
609 ASSERT_TRUE(error_msg.empty());
610 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
611 map_base + kPageSize * 2,
612 kPageSize,
613 PROT_READ | PROT_WRITE,
614 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000615 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700616 &error_msg));
617 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
618 ASSERT_TRUE(error_msg.empty());
619
620 // One-map cases.
621 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
622 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
623 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
624
625 // Two or three-map cases.
626 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
627 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
628 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
629
630 // Unmap the middle one.
631 map1.reset();
632
633 // Should return false now that there's a gap in the middle.
634 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
635}
636
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800637TEST_F(MemMapTest, AlignBy) {
638 CommonInit();
639 std::string error_msg;
640 // Cast the page size to size_t.
641 const size_t page_size = static_cast<size_t>(kPageSize);
642 // Map a region.
643 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
644 nullptr,
645 14 * page_size,
646 PROT_READ | PROT_WRITE,
647 false,
648 false,
649 &error_msg));
650 uint8_t* base0 = m0->Begin();
651 ASSERT_TRUE(base0 != nullptr) << error_msg;
652 ASSERT_EQ(m0->Size(), 14 * page_size);
653 ASSERT_EQ(BaseBegin(m0.get()), base0);
654 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
655
656 // Break it into several regions by using RemapAtEnd.
657 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
658 "MemMapTest_AlignByTest_map1",
659 PROT_READ | PROT_WRITE,
660 &error_msg));
661 uint8_t* base1 = m1->Begin();
662 ASSERT_TRUE(base1 != nullptr) << error_msg;
663 ASSERT_EQ(base1, base0 + 3 * page_size);
664 ASSERT_EQ(m0->Size(), 3 * page_size);
665
666 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
667 "MemMapTest_AlignByTest_map2",
668 PROT_READ | PROT_WRITE,
669 &error_msg));
670 uint8_t* base2 = m2->Begin();
671 ASSERT_TRUE(base2 != nullptr) << error_msg;
672 ASSERT_EQ(base2, base1 + 4 * page_size);
673 ASSERT_EQ(m1->Size(), 4 * page_size);
674
675 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
676 "MemMapTest_AlignByTest_map1",
677 PROT_READ | PROT_WRITE,
678 &error_msg));
679 uint8_t* base3 = m3->Begin();
680 ASSERT_TRUE(base3 != nullptr) << error_msg;
681 ASSERT_EQ(base3, base2 + 3 * page_size);
682 ASSERT_EQ(m2->Size(), 3 * page_size);
683 ASSERT_EQ(m3->Size(), 4 * page_size);
684
685 uint8_t* end0 = base0 + m0->Size();
686 uint8_t* end1 = base1 + m1->Size();
687 uint8_t* end2 = base2 + m2->Size();
688 uint8_t* end3 = base3 + m3->Size();
689
690 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
691
692 if (IsAlignedParam(base0, 2 * page_size)) {
693 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
694 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
695 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
696 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
697 } else {
698 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
699 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
700 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
701 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
702 }
703
704 // Align by 2 * page_size;
705 m0->AlignBy(2 * page_size);
706 m1->AlignBy(2 * page_size);
707 m2->AlignBy(2 * page_size);
708 m3->AlignBy(2 * page_size);
709
710 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
711 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
712 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
713 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
714
715 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
716 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
717 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
718 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
719
720 if (IsAlignedParam(base0, 2 * page_size)) {
721 EXPECT_EQ(m0->Begin(), base0);
722 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
723 EXPECT_EQ(m1->Begin(), base1 + page_size);
724 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
725 EXPECT_EQ(m2->Begin(), base2 + page_size);
726 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
727 EXPECT_EQ(m3->Begin(), base3);
728 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
729 } else {
730 EXPECT_EQ(m0->Begin(), base0 + page_size);
731 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
732 EXPECT_EQ(m1->Begin(), base1);
733 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
734 EXPECT_EQ(m2->Begin(), base2);
735 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
736 EXPECT_EQ(m3->Begin(), base3 + page_size);
737 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
738 }
739}
740
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700741} // namespace art