blob: 4a78bdcabece180c79fa83c2ebc997057b2fd144 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
David Sehrd5f8de82018-04-27 14:12:03 -070024#include "base/common_art_test.h"
25#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
David Sehr1979c642018-04-26 14:41:18 -070026#include "memory_tool.h"
27#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070028
29namespace art {
30
David Sehrd5f8de82018-04-27 14:12:03 -070031class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070032 public:
Ian Rogers13735952014-10-08 12:43:28 -070033 static uint8_t* BaseBegin(MemMap* mem_map) {
34 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070035 }
Mathieu Chartier16d29f82015-11-10 10:32:52 -080036
Ian Rogersef7d42f2014-01-06 12:55:46 -080037 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070038 return mem_map->base_size_;
39 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080040
Alex Lightca97ada2018-02-02 09:25:31 -080041 static bool IsAddressMapped(void* addr) {
42 bool res = msync(addr, 1, MS_SYNC) == 0;
43 if (!res && errno != ENOMEM) {
44 PLOG(FATAL) << "Unexpected error occurred on msync";
45 }
46 return res;
47 }
48
49 static std::vector<uint8_t> RandomData(size_t size) {
50 std::random_device rd;
51 std::uniform_int_distribution<uint8_t> dist;
52 std::vector<uint8_t> res;
53 res.resize(size);
54 for (size_t i = 0; i < size; i++) {
55 res[i] = dist(rd);
56 }
57 return res;
58 }
59
Mathieu Chartier16d29f82015-11-10 10:32:52 -080060 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
61 // Find a valid map address and unmap it before returning.
62 std::string error_msg;
63 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
64 nullptr,
65 size,
66 PROT_READ,
67 low_4gb,
68 false,
69 &error_msg));
70 CHECK(map != nullptr);
71 return map->Begin();
72 }
73
Ian Rogersef7d42f2014-01-06 12:55:46 -080074 static void RemapAtEndTest(bool low_4gb) {
75 std::string error_msg;
76 // Cast the page size to size_t.
77 const size_t page_size = static_cast<size_t>(kPageSize);
78 // Map a two-page memory region.
79 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
80 nullptr,
81 2 * page_size,
82 PROT_READ | PROT_WRITE,
83 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000084 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080085 &error_msg);
86 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070087 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080088 ASSERT_TRUE(base0 != nullptr) << error_msg;
89 size_t size0 = m0->Size();
90 EXPECT_EQ(m0->Size(), 2 * page_size);
91 EXPECT_EQ(BaseBegin(m0), base0);
92 EXPECT_EQ(BaseSize(m0), size0);
93 memset(base0, 42, 2 * page_size);
94 // Remap the latter half into a second MemMap.
95 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
96 "MemMapTest_RemapAtEndTest_map1",
97 PROT_READ | PROT_WRITE,
98 &error_msg);
99 // Check the states of the two maps.
100 EXPECT_EQ(m0->Begin(), base0) << error_msg;
101 EXPECT_EQ(m0->Size(), page_size);
102 EXPECT_EQ(BaseBegin(m0), base0);
103 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -0700104 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800105 size_t size1 = m1->Size();
106 EXPECT_EQ(base1, base0 + page_size);
107 EXPECT_EQ(size1, page_size);
108 EXPECT_EQ(BaseBegin(m1), base1);
109 EXPECT_EQ(BaseSize(m1), size1);
110 // Write to the second region.
111 memset(base1, 43, page_size);
112 // Check the contents of the two regions.
113 for (size_t i = 0; i < page_size; ++i) {
114 EXPECT_EQ(base0[i], 42);
115 }
116 for (size_t i = 0; i < page_size; ++i) {
117 EXPECT_EQ(base1[i], 43);
118 }
119 // Unmap the first region.
120 delete m0;
121 // Make sure the second region is still accessible after the first
122 // region is unmapped.
123 for (size_t i = 0; i < page_size; ++i) {
124 EXPECT_EQ(base1[i], 43);
125 }
126 delete m1;
127 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700128
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700129 void CommonInit() {
130 MemMap::Init();
131 }
132
Andreas Gamped8f26db2014-05-19 17:01:13 -0700133#if defined(__LP64__) && !defined(__x86_64__)
134 static uintptr_t GetLinearScanPos() {
135 return MemMap::next_mem_pos_;
136 }
137#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700138};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700139
Andreas Gamped8f26db2014-05-19 17:01:13 -0700140#if defined(__LP64__) && !defined(__x86_64__)
141
142#ifdef __BIONIC__
143extern uintptr_t CreateStartPos(uint64_t input);
144#endif
145
146TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700147 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700148 uintptr_t start = GetLinearScanPos();
149 EXPECT_LE(64 * KB, start);
150 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700151#ifdef __BIONIC__
152 // Test a couple of values. Make sure they are different.
153 uintptr_t last = 0;
154 for (size_t i = 0; i < 100; ++i) {
155 uintptr_t random_start = CreateStartPos(i * kPageSize);
156 EXPECT_NE(last, random_start);
157 last = random_start;
158 }
159
160 // Even on max, should be below ART_BASE_ADDRESS.
161 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
162#endif
163 // End of test.
164}
165#endif
166
Alex Lightca97ada2018-02-02 09:25:31 -0800167// We need mremap to be able to test ReplaceMapping at all
168#if HAVE_MREMAP_SYSCALL
169TEST_F(MemMapTest, ReplaceMapping_SameSize) {
170 std::string error_msg;
171 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
172 nullptr,
173 kPageSize,
174 PROT_READ,
175 false,
176 false,
177 &error_msg));
178 ASSERT_TRUE(dest != nullptr);
179 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
180 nullptr,
181 kPageSize,
182 PROT_WRITE | PROT_READ,
183 false,
184 false,
185 &error_msg);
186 ASSERT_TRUE(source != nullptr);
187 void* source_addr = source->Begin();
188 void* dest_addr = dest->Begin();
189 ASSERT_TRUE(IsAddressMapped(source_addr));
190 ASSERT_TRUE(IsAddressMapped(dest_addr));
191
192 std::vector<uint8_t> data = RandomData(kPageSize);
193 memcpy(source->Begin(), data.data(), data.size());
194
195 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
196
197 ASSERT_FALSE(IsAddressMapped(source_addr));
198 ASSERT_TRUE(IsAddressMapped(dest_addr));
199 ASSERT_TRUE(source == nullptr);
200
201 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
202
203 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
204}
205
206TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
207 std::string error_msg;
208 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
209 nullptr,
210 5 * kPageSize, // Need to make it larger
211 // initially so we know
212 // there won't be mappings
213 // in the way we we move
214 // source.
215 PROT_READ,
216 false,
217 false,
218 &error_msg));
219 ASSERT_TRUE(dest != nullptr);
220 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
221 nullptr,
222 3 * kPageSize,
223 PROT_WRITE | PROT_READ,
224 false,
225 false,
226 &error_msg);
227 ASSERT_TRUE(source != nullptr);
228 uint8_t* source_addr = source->Begin();
229 uint8_t* dest_addr = dest->Begin();
230 ASSERT_TRUE(IsAddressMapped(source_addr));
231
232 // Fill the source with random data.
233 std::vector<uint8_t> data = RandomData(3 * kPageSize);
234 memcpy(source->Begin(), data.data(), data.size());
235
236 // Make the dest smaller so that we know we'll have space.
237 dest->SetSize(kPageSize);
238
239 ASSERT_TRUE(IsAddressMapped(dest_addr));
240 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
241 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
242
243 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
244
245 ASSERT_FALSE(IsAddressMapped(source_addr));
246 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
247 ASSERT_TRUE(IsAddressMapped(dest_addr));
248 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
249 ASSERT_TRUE(source == nullptr);
250
251 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
252}
253
254TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
255 std::string error_msg;
256 std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
257 nullptr,
258 3 * kPageSize,
259 PROT_READ,
260 false,
261 false,
262 &error_msg));
263 ASSERT_TRUE(dest != nullptr);
264 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
265 nullptr,
266 kPageSize,
267 PROT_WRITE | PROT_READ,
268 false,
269 false,
270 &error_msg);
271 ASSERT_TRUE(source != nullptr);
272 uint8_t* source_addr = source->Begin();
273 uint8_t* dest_addr = dest->Begin();
274 ASSERT_TRUE(IsAddressMapped(source_addr));
275 ASSERT_TRUE(IsAddressMapped(dest_addr));
276 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
277 ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
278
279 std::vector<uint8_t> data = RandomData(kPageSize);
280 memcpy(source->Begin(), data.data(), kPageSize);
281
282 ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
283
284 ASSERT_FALSE(IsAddressMapped(source_addr));
285 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
286 ASSERT_TRUE(IsAddressMapped(dest_addr));
287 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
288 ASSERT_TRUE(source == nullptr);
289
290 ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
291}
292
293TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
294 std::string error_msg;
295 std::unique_ptr<MemMap> dest(
296 MemMap::MapAnonymous(
297 "MapAnonymousEmpty-atomic-replace-dest",
298 nullptr,
299 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
300 // the way we we move source.
301 PROT_READ | PROT_WRITE,
302 false,
303 false,
304 &error_msg));
305 ASSERT_TRUE(dest != nullptr);
306 // Resize down to 1 page so we can remap the rest.
307 dest->SetSize(kPageSize);
308 // Create source from the last 2 pages
309 MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
310 dest->Begin() + kPageSize,
311 2 * kPageSize,
312 PROT_WRITE | PROT_READ,
313 false,
314 false,
315 &error_msg);
316 ASSERT_TRUE(source != nullptr);
317 MemMap* orig_source = source;
318 ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
319 uint8_t* source_addr = source->Begin();
320 uint8_t* dest_addr = dest->Begin();
321 ASSERT_TRUE(IsAddressMapped(source_addr));
322
323 // Fill the source and dest with random data.
324 std::vector<uint8_t> data = RandomData(2 * kPageSize);
325 memcpy(source->Begin(), data.data(), data.size());
326 std::vector<uint8_t> dest_data = RandomData(kPageSize);
327 memcpy(dest->Begin(), dest_data.data(), dest_data.size());
328
329 ASSERT_TRUE(IsAddressMapped(dest_addr));
330 ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
331
332 ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
333
334 ASSERT_TRUE(source == orig_source);
335 ASSERT_TRUE(IsAddressMapped(source_addr));
336 ASSERT_TRUE(IsAddressMapped(dest_addr));
337 ASSERT_EQ(source->Size(), data.size());
338 ASSERT_EQ(dest->Size(), dest_data.size());
339
340 ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
341 ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
342
343 delete source;
344}
345#endif // HAVE_MREMAP_SYSCALL
346
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700347TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700348 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700349 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700350 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000351 nullptr,
352 0,
353 PROT_READ,
354 false,
355 false,
356 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800357 ASSERT_TRUE(map.get() != nullptr) << error_msg;
358 ASSERT_TRUE(error_msg.empty());
359 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
360 nullptr,
361 kPageSize,
362 PROT_READ | PROT_WRITE,
363 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000364 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800365 &error_msg));
366 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700367 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700368}
369
Mathieu Chartier486932a2016-02-24 10:09:23 -0800370TEST_F(MemMapTest, MapAnonymousFailNullError) {
371 CommonInit();
372 // Test that we don't crash with a null error_str when mapping at an invalid location.
373 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
374 reinterpret_cast<uint8_t*>(kPageSize),
375 0x20000,
376 PROT_READ | PROT_WRITE,
377 false,
378 false,
379 nullptr));
380 ASSERT_EQ(nullptr, map.get());
381}
382
Ian Rogersef7d42f2014-01-06 12:55:46 -0800383#ifdef __LP64__
384TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700385 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700386 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700387 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000388 nullptr,
389 kPageSize,
390 PROT_READ | PROT_WRITE,
391 true,
392 false,
393 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800394 ASSERT_TRUE(map.get() != nullptr) << error_msg;
395 ASSERT_TRUE(error_msg.empty());
396 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700397}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800398TEST_F(MemMapTest, MapFile32Bit) {
399 CommonInit();
400 std::string error_msg;
401 ScratchFile scratch_file;
402 constexpr size_t kMapSize = kPageSize;
403 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
404 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
405 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
406 PROT_READ,
407 MAP_PRIVATE,
408 scratch_file.GetFd(),
409 /*start*/0,
410 /*low_4gb*/true,
411 scratch_file.GetFilename().c_str(),
412 &error_msg));
413 ASSERT_TRUE(map != nullptr) << error_msg;
414 ASSERT_TRUE(error_msg.empty());
415 ASSERT_EQ(map->Size(), kMapSize);
416 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
417}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800418#endif
419
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700420TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700421 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700422 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800423 // Find a valid address.
424 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700425 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700426 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800427 valid_address,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000428 kPageSize,
429 PROT_READ | PROT_WRITE,
430 false,
431 false,
432 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700433 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
434 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800435 ASSERT_TRUE(map0->BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700436 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700437 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000438 nullptr,
439 kPageSize,
440 PROT_READ | PROT_WRITE,
441 false,
442 false,
443 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700444 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
445 ASSERT_TRUE(error_msg.empty());
446 ASSERT_TRUE(map1->BaseBegin() != nullptr);
447 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700448 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000449 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
450 kPageSize,
451 PROT_READ | PROT_WRITE,
452 false,
453 false,
454 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700455 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
456 ASSERT_TRUE(!error_msg.empty());
457}
458
Ian Rogersef7d42f2014-01-06 12:55:46 -0800459TEST_F(MemMapTest, RemapAtEnd) {
460 RemapAtEndTest(false);
461}
462
463#ifdef __LP64__
464TEST_F(MemMapTest, RemapAtEnd32bit) {
465 RemapAtEndTest(true);
466}
467#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700468
Qiming Shi84d49cc2014-04-24 15:38:41 +0800469TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000470 // Some MIPS32 hardware (namely the Creator Ci20 development board)
471 // cannot allocate in the 2GB-4GB region.
472 TEST_DISABLED_FOR_MIPS();
473
Roland Levillain05e34f42018-05-24 13:19:05 +0000474 // This test may not work under Valgrind.
475 // TODO: Valgrind is no longer supported, but Address Sanitizer is:
476 // check whether this test works with ASan.
477 TEST_DISABLED_FOR_MEMORY_TOOL();
478
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700479 CommonInit();
Roland Levillain05e34f42018-05-24 13:19:05 +0000480 constexpr size_t size = 0x100000;
481 // Try all addresses starting from 2GB to 4GB.
482 size_t start_addr = 2 * GB;
483 std::string error_msg;
484 std::unique_ptr<MemMap> map;
485 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
486 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
487 reinterpret_cast<uint8_t*>(start_addr),
488 size,
489 PROT_READ | PROT_WRITE,
490 /*low_4gb*/true,
491 false,
492 &error_msg));
493 if (map != nullptr) {
494 break;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800495 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700496 }
Roland Levillain05e34f42018-05-24 13:19:05 +0000497 ASSERT_TRUE(map.get() != nullptr) << error_msg;
498 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
499 ASSERT_TRUE(error_msg.empty());
500 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800501}
502
503TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700504 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800505 std::string error_msg;
506 uintptr_t ptr = 0;
507 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700508 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000509 reinterpret_cast<uint8_t*>(ptr),
510 2 * kPageSize, // brings it over the top.
511 PROT_READ | PROT_WRITE,
512 false,
513 false,
514 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800515 ASSERT_EQ(nullptr, map.get());
516 ASSERT_FALSE(error_msg.empty());
517}
518
519#ifdef __LP64__
520TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700521 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800522 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000523 std::unique_ptr<MemMap> map(
524 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
525 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
526 kPageSize,
527 PROT_READ | PROT_WRITE,
528 true,
529 false,
530 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800531 ASSERT_EQ(nullptr, map.get());
532 ASSERT_FALSE(error_msg.empty());
533}
534
535TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700536 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800537 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700538 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000539 reinterpret_cast<uint8_t*>(0xF0000000),
540 0x20000000,
541 PROT_READ | PROT_WRITE,
542 true,
543 false,
544 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800545 ASSERT_EQ(nullptr, map.get());
546 ASSERT_FALSE(error_msg.empty());
547}
548#endif
549
Vladimir Marko5c42c292015-02-25 12:02:49 +0000550TEST_F(MemMapTest, MapAnonymousReuse) {
551 CommonInit();
552 std::string error_msg;
553 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
554 nullptr,
555 0x20000,
556 PROT_READ | PROT_WRITE,
557 false,
558 false,
559 &error_msg));
560 ASSERT_NE(nullptr, map.get());
561 ASSERT_TRUE(error_msg.empty());
562 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
563 reinterpret_cast<uint8_t*>(map->BaseBegin()),
564 0x10000,
565 PROT_READ | PROT_WRITE,
566 false,
567 true,
568 &error_msg));
569 ASSERT_NE(nullptr, map2.get());
570 ASSERT_TRUE(error_msg.empty());
571}
572
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700573TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700574 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700575 std::string error_msg;
576 constexpr size_t kNumPages = 3;
577 // Map a 3-page mem map.
578 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
579 nullptr,
580 kPageSize * kNumPages,
581 PROT_READ | PROT_WRITE,
582 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000583 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700584 &error_msg));
585 ASSERT_TRUE(map.get() != nullptr) << error_msg;
586 ASSERT_TRUE(error_msg.empty());
587 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700588 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700589 // Unmap it.
590 map.reset();
591
592 // Map at the same address, but in page-sized separate mem maps,
593 // assuming the space at the address is still available.
594 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
595 map_base,
596 kPageSize,
597 PROT_READ | PROT_WRITE,
598 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000599 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700600 &error_msg));
601 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
602 ASSERT_TRUE(error_msg.empty());
603 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
604 map_base + kPageSize,
605 kPageSize,
606 PROT_READ | PROT_WRITE,
607 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000608 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700609 &error_msg));
610 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
611 ASSERT_TRUE(error_msg.empty());
612 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
613 map_base + kPageSize * 2,
614 kPageSize,
615 PROT_READ | PROT_WRITE,
616 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000617 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700618 &error_msg));
619 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
620 ASSERT_TRUE(error_msg.empty());
621
622 // One-map cases.
623 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
624 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
625 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
626
627 // Two or three-map cases.
628 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
629 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
630 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
631
632 // Unmap the middle one.
633 map1.reset();
634
635 // Should return false now that there's a gap in the middle.
636 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
637}
638
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800639TEST_F(MemMapTest, AlignBy) {
640 CommonInit();
641 std::string error_msg;
642 // Cast the page size to size_t.
643 const size_t page_size = static_cast<size_t>(kPageSize);
644 // Map a region.
645 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
646 nullptr,
647 14 * page_size,
648 PROT_READ | PROT_WRITE,
649 false,
650 false,
651 &error_msg));
652 uint8_t* base0 = m0->Begin();
653 ASSERT_TRUE(base0 != nullptr) << error_msg;
654 ASSERT_EQ(m0->Size(), 14 * page_size);
655 ASSERT_EQ(BaseBegin(m0.get()), base0);
656 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
657
658 // Break it into several regions by using RemapAtEnd.
659 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
660 "MemMapTest_AlignByTest_map1",
661 PROT_READ | PROT_WRITE,
662 &error_msg));
663 uint8_t* base1 = m1->Begin();
664 ASSERT_TRUE(base1 != nullptr) << error_msg;
665 ASSERT_EQ(base1, base0 + 3 * page_size);
666 ASSERT_EQ(m0->Size(), 3 * page_size);
667
668 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
669 "MemMapTest_AlignByTest_map2",
670 PROT_READ | PROT_WRITE,
671 &error_msg));
672 uint8_t* base2 = m2->Begin();
673 ASSERT_TRUE(base2 != nullptr) << error_msg;
674 ASSERT_EQ(base2, base1 + 4 * page_size);
675 ASSERT_EQ(m1->Size(), 4 * page_size);
676
677 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
678 "MemMapTest_AlignByTest_map1",
679 PROT_READ | PROT_WRITE,
680 &error_msg));
681 uint8_t* base3 = m3->Begin();
682 ASSERT_TRUE(base3 != nullptr) << error_msg;
683 ASSERT_EQ(base3, base2 + 3 * page_size);
684 ASSERT_EQ(m2->Size(), 3 * page_size);
685 ASSERT_EQ(m3->Size(), 4 * page_size);
686
687 uint8_t* end0 = base0 + m0->Size();
688 uint8_t* end1 = base1 + m1->Size();
689 uint8_t* end2 = base2 + m2->Size();
690 uint8_t* end3 = base3 + m3->Size();
691
692 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
693
694 if (IsAlignedParam(base0, 2 * page_size)) {
695 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
696 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
697 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
698 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
699 } else {
700 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
701 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
702 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
703 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
704 }
705
706 // Align by 2 * page_size;
707 m0->AlignBy(2 * page_size);
708 m1->AlignBy(2 * page_size);
709 m2->AlignBy(2 * page_size);
710 m3->AlignBy(2 * page_size);
711
712 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
713 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
714 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
715 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
716
717 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
718 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
719 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
720 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
721
722 if (IsAlignedParam(base0, 2 * page_size)) {
723 EXPECT_EQ(m0->Begin(), base0);
724 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
725 EXPECT_EQ(m1->Begin(), base1 + page_size);
726 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
727 EXPECT_EQ(m2->Begin(), base2 + page_size);
728 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
729 EXPECT_EQ(m3->Begin(), base3);
730 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
731 } else {
732 EXPECT_EQ(m0->Begin(), base0 + page_size);
733 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
734 EXPECT_EQ(m1->Begin(), base1);
735 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
736 EXPECT_EQ(m2->Begin(), base2);
737 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
738 EXPECT_EQ(m3->Begin(), base3 + page_size);
739 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
740 }
741}
742
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700743} // namespace art