blob: 3adbf18a7a6acde7f93b2ca32f94592274aee9cd [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
Evgenii Stepanov1e133742015-05-20 12:30:59 -070024#include "base/memory_tool.h"
Mathieu Chartier42bddce2015-11-09 15:16:56 -080025#include "base/unix_file/fd_file.h"
Andreas Gampe8cf9cb32017-07-19 09:28:38 -070026#include "common_runtime_test.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070027
28namespace art {
29
Mathieu Chartier42bddce2015-11-09 15:16:56 -080030class MemMapTest : public CommonRuntimeTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070031 public:
Ian Rogers13735952014-10-08 12:43:28 -070032 static uint8_t* BaseBegin(MemMap* mem_map) {
33 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070034 }
Mathieu Chartier16d29f82015-11-10 10:32:52 -080035
Ian Rogersef7d42f2014-01-06 12:55:46 -080036 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070037 return mem_map->base_size_;
38 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080039
Alex Lightca97ada2018-02-02 09:25:31 -080040 static bool IsAddressMapped(void* addr) {
41 bool res = msync(addr, 1, MS_SYNC) == 0;
42 if (!res && errno != ENOMEM) {
43 PLOG(FATAL) << "Unexpected error occurred on msync";
44 }
45 return res;
46 }
47
48 static std::vector<uint8_t> RandomData(size_t size) {
49 std::random_device rd;
50 std::uniform_int_distribution<uint8_t> dist;
51 std::vector<uint8_t> res;
52 res.resize(size);
53 for (size_t i = 0; i < size; i++) {
54 res[i] = dist(rd);
55 }
56 return res;
57 }
58
Mathieu Chartier16d29f82015-11-10 10:32:52 -080059 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
60 // Find a valid map address and unmap it before returning.
61 std::string error_msg;
62 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
63 nullptr,
64 size,
65 PROT_READ,
66 low_4gb,
67 false,
68 &error_msg));
69 CHECK(map != nullptr);
70 return map->Begin();
71 }
72
Ian Rogersef7d42f2014-01-06 12:55:46 -080073 static void RemapAtEndTest(bool low_4gb) {
74 std::string error_msg;
75 // Cast the page size to size_t.
76 const size_t page_size = static_cast<size_t>(kPageSize);
77 // Map a two-page memory region.
78 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
79 nullptr,
80 2 * page_size,
81 PROT_READ | PROT_WRITE,
82 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000083 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080084 &error_msg);
85 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070086 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080087 ASSERT_TRUE(base0 != nullptr) << error_msg;
88 size_t size0 = m0->Size();
89 EXPECT_EQ(m0->Size(), 2 * page_size);
90 EXPECT_EQ(BaseBegin(m0), base0);
91 EXPECT_EQ(BaseSize(m0), size0);
92 memset(base0, 42, 2 * page_size);
93 // Remap the latter half into a second MemMap.
94 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
95 "MemMapTest_RemapAtEndTest_map1",
96 PROT_READ | PROT_WRITE,
97 &error_msg);
98 // Check the states of the two maps.
99 EXPECT_EQ(m0->Begin(), base0) << error_msg;
100 EXPECT_EQ(m0->Size(), page_size);
101 EXPECT_EQ(BaseBegin(m0), base0);
102 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -0700103 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800104 size_t size1 = m1->Size();
105 EXPECT_EQ(base1, base0 + page_size);
106 EXPECT_EQ(size1, page_size);
107 EXPECT_EQ(BaseBegin(m1), base1);
108 EXPECT_EQ(BaseSize(m1), size1);
109 // Write to the second region.
110 memset(base1, 43, page_size);
111 // Check the contents of the two regions.
112 for (size_t i = 0; i < page_size; ++i) {
113 EXPECT_EQ(base0[i], 42);
114 }
115 for (size_t i = 0; i < page_size; ++i) {
116 EXPECT_EQ(base1[i], 43);
117 }
118 // Unmap the first region.
119 delete m0;
120 // Make sure the second region is still accessible after the first
121 // region is unmapped.
122 for (size_t i = 0; i < page_size; ++i) {
123 EXPECT_EQ(base1[i], 43);
124 }
125 delete m1;
126 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700127
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700128 void CommonInit() {
129 MemMap::Init();
130 }
131
Andreas Gamped8f26db2014-05-19 17:01:13 -0700132#if defined(__LP64__) && !defined(__x86_64__)
133 static uintptr_t GetLinearScanPos() {
134 return MemMap::next_mem_pos_;
135 }
136#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700137};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700138
Andreas Gamped8f26db2014-05-19 17:01:13 -0700139#if defined(__LP64__) && !defined(__x86_64__)
140
141#ifdef __BIONIC__
142extern uintptr_t CreateStartPos(uint64_t input);
143#endif
144
145TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700146 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700147 uintptr_t start = GetLinearScanPos();
148 EXPECT_LE(64 * KB, start);
149 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700150#ifdef __BIONIC__
151 // Test a couple of values. Make sure they are different.
152 uintptr_t last = 0;
153 for (size_t i = 0; i < 100; ++i) {
154 uintptr_t random_start = CreateStartPos(i * kPageSize);
155 EXPECT_NE(last, random_start);
156 last = random_start;
157 }
158
159 // Even on max, should be below ART_BASE_ADDRESS.
160 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
161#endif
162 // End of test.
163}
164#endif
165
Alex Lightca97ada2018-02-02 09:25:31 -0800166// We need mremap to be able to test ReplaceMapping at all
167#if HAVE_MREMAP_SYSCALL
168TEST_F(MemMapTest, ReplaceMapping_SameSize) {
169 std::string error_msg;
170 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
171 nullptr,
172 kPageSize,
173 PROT_READ,
174 false,
175 false,
176 &error_msg));
177 ASSERT_TRUE(dest != nullptr);
178 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
179 nullptr,
180 kPageSize,
181 PROT_WRITE | PROT_READ,
182 false,
183 false,
184 &error_msg);
185 ASSERT_TRUE(source != nullptr);
186 void* source_addr = source->Begin();
187 void* dest_addr = dest->Begin();
188 ASSERT_TRUE(IsAddressMapped(source_addr));
189 ASSERT_TRUE(IsAddressMapped(dest_addr));
190
191 std::vector<uint8_t> data = RandomData(kPageSize);
192 memcpy(source->Begin(), data.data(), data.size());
193
194 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
195
196 ASSERT_FALSE(IsAddressMapped(source_addr));
197 ASSERT_TRUE(IsAddressMapped(dest_addr));
198 ASSERT_TRUE(source == nullptr);
199
200 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
201
202 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
203}
204
205TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
206 std::string error_msg;
207 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
208 nullptr,
209 5 * kPageSize, // Need to make it larger
210 // initially so we know
211 // there won't be mappings
212 // in the way we we move
213 // source.
214 PROT_READ,
215 false,
216 false,
217 &error_msg));
218 ASSERT_TRUE(dest != nullptr);
219 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
220 nullptr,
221 3 * kPageSize,
222 PROT_WRITE | PROT_READ,
223 false,
224 false,
225 &error_msg);
226 ASSERT_TRUE(source != nullptr);
227 uint8_t* source_addr = source->Begin();
228 uint8_t* dest_addr = dest->Begin();
229 ASSERT_TRUE(IsAddressMapped(source_addr));
230
231 // Fill the source with random data.
232 std::vector<uint8_t> data = RandomData(3 * kPageSize);
233 memcpy(source->Begin(), data.data(), data.size());
234
235 // Make the dest smaller so that we know we'll have space.
236 dest->SetSize(kPageSize);
237
238 ASSERT_TRUE(IsAddressMapped(dest_addr));
239 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
240 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
241
242 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
243
244 ASSERT_FALSE(IsAddressMapped(source_addr));
245 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
246 ASSERT_TRUE(IsAddressMapped(dest_addr));
247 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
248 ASSERT_TRUE(source == nullptr);
249
250 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
251}
252
253TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
254 std::string error_msg;
255 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
256 nullptr,
257 3 * kPageSize,
258 PROT_READ,
259 false,
260 false,
261 &error_msg));
262 ASSERT_TRUE(dest != nullptr);
263 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
264 nullptr,
265 kPageSize,
266 PROT_WRITE | PROT_READ,
267 false,
268 false,
269 &error_msg);
270 ASSERT_TRUE(source != nullptr);
271 uint8_t* source_addr = source->Begin();
272 uint8_t* dest_addr = dest->Begin();
273 ASSERT_TRUE(IsAddressMapped(source_addr));
274 ASSERT_TRUE(IsAddressMapped(dest_addr));
275 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
276 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
277
278 std::vector<uint8_t> data = RandomData(kPageSize);
279 memcpy(source->Begin(), data.data(), kPageSize);
280
281 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
282
283 ASSERT_FALSE(IsAddressMapped(source_addr));
284 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
285 ASSERT_TRUE(IsAddressMapped(dest_addr));
286 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
287 ASSERT_TRUE(source == nullptr);
288
289 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
290}
291
292TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
293 std::string error_msg;
294 std::unique_ptr<MemMap> dest(
295 MemMap::MapAnonymous(
296 "MapAnonymousEmpty-atomic-replace-dest",
297 nullptr,
298 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
299 // the way we we move source.
300 PROT_READ | PROT_WRITE,
301 false,
302 false,
303 &error_msg));
304 ASSERT_TRUE(dest != nullptr);
305 // Resize down to 1 page so we can remap the rest.
306 dest->SetSize(kPageSize);
307 // Create source from the last 2 pages
308 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
309 dest->Begin() + kPageSize,
310 2 * kPageSize,
311 PROT_WRITE | PROT_READ,
312 false,
313 false,
314 &error_msg);
315 ASSERT_TRUE(source != nullptr);
316 MemMap* orig_source = source;
317 ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
318 uint8_t* source_addr = source->Begin();
319 uint8_t* dest_addr = dest->Begin();
320 ASSERT_TRUE(IsAddressMapped(source_addr));
321
322 // Fill the source and dest with random data.
323 std::vector<uint8_t> data = RandomData(2 * kPageSize);
324 memcpy(source->Begin(), data.data(), data.size());
325 std::vector<uint8_t> dest_data = RandomData(kPageSize);
326 memcpy(dest->Begin(), dest_data.data(), dest_data.size());
327
328 ASSERT_TRUE(IsAddressMapped(dest_addr));
329 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
330
331 ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
332
333 ASSERT_TRUE(source == orig_source);
334 ASSERT_TRUE(IsAddressMapped(source_addr));
335 ASSERT_TRUE(IsAddressMapped(dest_addr));
336 ASSERT_EQ(source->Size(), data.size());
337 ASSERT_EQ(dest->Size(), dest_data.size());
338
339 ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
340 ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
341
342 delete source;
343}
344#endif // HAVE_MREMAP_SYSCALL
345
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700346TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700347 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700348 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700349 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000350 nullptr,
351 0,
352 PROT_READ,
353 false,
354 false,
355 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800356 ASSERT_TRUE(map.get() != nullptr) << error_msg;
357 ASSERT_TRUE(error_msg.empty());
358 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
359 nullptr,
360 kPageSize,
361 PROT_READ | PROT_WRITE,
362 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000363 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800364 &error_msg));
365 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700366 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700367}
368
Mathieu Chartier486932a2016-02-24 10:09:23 -0800369TEST_F(MemMapTest, MapAnonymousFailNullError) {
370 CommonInit();
371 // Test that we don't crash with a null error_str when mapping at an invalid location.
372 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
373 reinterpret_cast<uint8_t*>(kPageSize),
374 0x20000,
375 PROT_READ | PROT_WRITE,
376 false,
377 false,
378 nullptr));
379 ASSERT_EQ(nullptr, map.get());
380}
381
Ian Rogersef7d42f2014-01-06 12:55:46 -0800382#ifdef __LP64__
383TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700384 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700385 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700386 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000387 nullptr,
388 kPageSize,
389 PROT_READ | PROT_WRITE,
390 true,
391 false,
392 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800393 ASSERT_TRUE(map.get() != nullptr) << error_msg;
394 ASSERT_TRUE(error_msg.empty());
395 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700396}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800397TEST_F(MemMapTest, MapFile32Bit) {
398 CommonInit();
399 std::string error_msg;
400 ScratchFile scratch_file;
401 constexpr size_t kMapSize = kPageSize;
402 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
403 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
404 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
405 PROT_READ,
406 MAP_PRIVATE,
407 scratch_file.GetFd(),
408 /*start*/0,
409 /*low_4gb*/true,
410 scratch_file.GetFilename().c_str(),
411 &error_msg));
412 ASSERT_TRUE(map != nullptr) << error_msg;
413 ASSERT_TRUE(error_msg.empty());
414 ASSERT_EQ(map->Size(), kMapSize);
415 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
416}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800417#endif
418
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700419TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700420 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700421 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800422 // Find a valid address.
423 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700424 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700425 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800426 valid_address,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000427 kPageSize,
428 PROT_READ | PROT_WRITE,
429 false,
430 false,
431 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700432 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
433 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800434 ASSERT_TRUE(map0->BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700435 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700436 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000437 nullptr,
438 kPageSize,
439 PROT_READ | PROT_WRITE,
440 false,
441 false,
442 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700443 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
444 ASSERT_TRUE(error_msg.empty());
445 ASSERT_TRUE(map1->BaseBegin() != nullptr);
446 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700447 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000448 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
449 kPageSize,
450 PROT_READ | PROT_WRITE,
451 false,
452 false,
453 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700454 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
455 ASSERT_TRUE(!error_msg.empty());
456}
457
Ian Rogersef7d42f2014-01-06 12:55:46 -0800458TEST_F(MemMapTest, RemapAtEnd) {
459 RemapAtEndTest(false);
460}
461
462#ifdef __LP64__
463TEST_F(MemMapTest, RemapAtEnd32bit) {
464 RemapAtEndTest(true);
465}
466#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700467
Qiming Shi84d49cc2014-04-24 15:38:41 +0800468TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000469 // Some MIPS32 hardware (namely the Creator Ci20 development board)
470 // cannot allocate in the 2GB-4GB region.
471 TEST_DISABLED_FOR_MIPS();
472
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700473 CommonInit();
Andreas Gampe928f72b2014-09-09 19:53:48 -0700474 // This test may not work under valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700475 if (RUNNING_ON_MEMORY_TOOL == 0) {
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800476 constexpr size_t size = 0x100000;
477 // Try all addresses starting from 2GB to 4GB.
478 size_t start_addr = 2 * GB;
Andreas Gampe928f72b2014-09-09 19:53:48 -0700479 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800480 std::unique_ptr<MemMap> map;
481 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
482 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
483 reinterpret_cast<uint8_t*>(start_addr),
484 size,
485 PROT_READ | PROT_WRITE,
486 /*low_4gb*/true,
487 false,
488 &error_msg));
489 if (map != nullptr) {
490 break;
491 }
492 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700493 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Roland Levillain8d026442016-01-19 17:30:33 +0000494 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
Andreas Gampe928f72b2014-09-09 19:53:48 -0700495 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800496 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
Andreas Gampe928f72b2014-09-09 19:53:48 -0700497 }
Qiming Shi84d49cc2014-04-24 15:38:41 +0800498}
499
500TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700501 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800502 std::string error_msg;
503 uintptr_t ptr = 0;
504 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700505 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000506 reinterpret_cast<uint8_t*>(ptr),
507 2 * kPageSize, // brings it over the top.
508 PROT_READ | PROT_WRITE,
509 false,
510 false,
511 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800512 ASSERT_EQ(nullptr, map.get());
513 ASSERT_FALSE(error_msg.empty());
514}
515
516#ifdef __LP64__
517TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700518 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800519 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000520 std::unique_ptr<MemMap> map(
521 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
522 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
523 kPageSize,
524 PROT_READ | PROT_WRITE,
525 true,
526 false,
527 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800528 ASSERT_EQ(nullptr, map.get());
529 ASSERT_FALSE(error_msg.empty());
530}
531
532TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700533 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800534 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700535 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000536 reinterpret_cast<uint8_t*>(0xF0000000),
537 0x20000000,
538 PROT_READ | PROT_WRITE,
539 true,
540 false,
541 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800542 ASSERT_EQ(nullptr, map.get());
543 ASSERT_FALSE(error_msg.empty());
544}
545#endif
546
Vladimir Marko5c42c292015-02-25 12:02:49 +0000547TEST_F(MemMapTest, MapAnonymousReuse) {
548 CommonInit();
549 std::string error_msg;
550 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
551 nullptr,
552 0x20000,
553 PROT_READ | PROT_WRITE,
554 false,
555 false,
556 &error_msg));
557 ASSERT_NE(nullptr, map.get());
558 ASSERT_TRUE(error_msg.empty());
559 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
560 reinterpret_cast<uint8_t*>(map->BaseBegin()),
561 0x10000,
562 PROT_READ | PROT_WRITE,
563 false,
564 true,
565 &error_msg));
566 ASSERT_NE(nullptr, map2.get());
567 ASSERT_TRUE(error_msg.empty());
568}
569
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700570TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700571 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700572 std::string error_msg;
573 constexpr size_t kNumPages = 3;
574 // Map a 3-page mem map.
575 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
576 nullptr,
577 kPageSize * kNumPages,
578 PROT_READ | PROT_WRITE,
579 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000580 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700581 &error_msg));
582 ASSERT_TRUE(map.get() != nullptr) << error_msg;
583 ASSERT_TRUE(error_msg.empty());
584 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700585 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700586 // Unmap it.
587 map.reset();
588
589 // Map at the same address, but in page-sized separate mem maps,
590 // assuming the space at the address is still available.
591 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
592 map_base,
593 kPageSize,
594 PROT_READ | PROT_WRITE,
595 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000596 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700597 &error_msg));
598 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
599 ASSERT_TRUE(error_msg.empty());
600 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
601 map_base + kPageSize,
602 kPageSize,
603 PROT_READ | PROT_WRITE,
604 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000605 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700606 &error_msg));
607 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
608 ASSERT_TRUE(error_msg.empty());
609 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
610 map_base + kPageSize * 2,
611 kPageSize,
612 PROT_READ | PROT_WRITE,
613 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000614 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700615 &error_msg));
616 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
617 ASSERT_TRUE(error_msg.empty());
618
619 // One-map cases.
620 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
621 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
622 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
623
624 // Two or three-map cases.
625 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
626 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
627 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
628
629 // Unmap the middle one.
630 map1.reset();
631
632 // Should return false now that there's a gap in the middle.
633 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
634}
635
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800636TEST_F(MemMapTest, AlignBy) {
637 CommonInit();
638 std::string error_msg;
639 // Cast the page size to size_t.
640 const size_t page_size = static_cast<size_t>(kPageSize);
641 // Map a region.
642 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
643 nullptr,
644 14 * page_size,
645 PROT_READ | PROT_WRITE,
646 false,
647 false,
648 &error_msg));
649 uint8_t* base0 = m0->Begin();
650 ASSERT_TRUE(base0 != nullptr) << error_msg;
651 ASSERT_EQ(m0->Size(), 14 * page_size);
652 ASSERT_EQ(BaseBegin(m0.get()), base0);
653 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
654
655 // Break it into several regions by using RemapAtEnd.
656 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
657 "MemMapTest_AlignByTest_map1",
658 PROT_READ | PROT_WRITE,
659 &error_msg));
660 uint8_t* base1 = m1->Begin();
661 ASSERT_TRUE(base1 != nullptr) << error_msg;
662 ASSERT_EQ(base1, base0 + 3 * page_size);
663 ASSERT_EQ(m0->Size(), 3 * page_size);
664
665 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
666 "MemMapTest_AlignByTest_map2",
667 PROT_READ | PROT_WRITE,
668 &error_msg));
669 uint8_t* base2 = m2->Begin();
670 ASSERT_TRUE(base2 != nullptr) << error_msg;
671 ASSERT_EQ(base2, base1 + 4 * page_size);
672 ASSERT_EQ(m1->Size(), 4 * page_size);
673
674 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
675 "MemMapTest_AlignByTest_map1",
676 PROT_READ | PROT_WRITE,
677 &error_msg));
678 uint8_t* base3 = m3->Begin();
679 ASSERT_TRUE(base3 != nullptr) << error_msg;
680 ASSERT_EQ(base3, base2 + 3 * page_size);
681 ASSERT_EQ(m2->Size(), 3 * page_size);
682 ASSERT_EQ(m3->Size(), 4 * page_size);
683
684 uint8_t* end0 = base0 + m0->Size();
685 uint8_t* end1 = base1 + m1->Size();
686 uint8_t* end2 = base2 + m2->Size();
687 uint8_t* end3 = base3 + m3->Size();
688
689 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
690
691 if (IsAlignedParam(base0, 2 * page_size)) {
692 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
693 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
694 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
695 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
696 } else {
697 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
698 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
699 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
700 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
701 }
702
703 // Align by 2 * page_size;
704 m0->AlignBy(2 * page_size);
705 m1->AlignBy(2 * page_size);
706 m2->AlignBy(2 * page_size);
707 m3->AlignBy(2 * page_size);
708
709 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
710 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
711 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
712 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
713
714 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
715 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
716 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
717 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
718
719 if (IsAlignedParam(base0, 2 * page_size)) {
720 EXPECT_EQ(m0->Begin(), base0);
721 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
722 EXPECT_EQ(m1->Begin(), base1 + page_size);
723 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
724 EXPECT_EQ(m2->Begin(), base2 + page_size);
725 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
726 EXPECT_EQ(m3->Begin(), base3);
727 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
728 } else {
729 EXPECT_EQ(m0->Begin(), base0 + page_size);
730 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
731 EXPECT_EQ(m1->Begin(), base1);
732 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
733 EXPECT_EQ(m2->Begin(), base2);
734 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
735 EXPECT_EQ(m3->Begin(), base3 + page_size);
736 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
737 }
738}
739
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700740} // namespace art