blob: 8d6bb3883801a8ac0f1b8814d0e6bbec2ff9ee0f [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
22
Mathieu Chartier42bddce2015-11-09 15:16:56 -080023#include "common_runtime_test.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070024#include "base/memory_tool.h"
Mathieu Chartier42bddce2015-11-09 15:16:56 -080025#include "base/unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070026
27namespace art {
28
Mathieu Chartier42bddce2015-11-09 15:16:56 -080029class MemMapTest : public CommonRuntimeTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070030 public:
Ian Rogers13735952014-10-08 12:43:28 -070031 static uint8_t* BaseBegin(MemMap* mem_map) {
32 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070033 }
Mathieu Chartier16d29f82015-11-10 10:32:52 -080034
Ian Rogersef7d42f2014-01-06 12:55:46 -080035 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070036 return mem_map->base_size_;
37 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080038
Mathieu Chartier16d29f82015-11-10 10:32:52 -080039 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
40 // Find a valid map address and unmap it before returning.
41 std::string error_msg;
42 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
43 nullptr,
44 size,
45 PROT_READ,
46 low_4gb,
47 false,
48 &error_msg));
49 CHECK(map != nullptr);
50 return map->Begin();
51 }
52
Ian Rogersef7d42f2014-01-06 12:55:46 -080053 static void RemapAtEndTest(bool low_4gb) {
54 std::string error_msg;
55 // Cast the page size to size_t.
56 const size_t page_size = static_cast<size_t>(kPageSize);
57 // Map a two-page memory region.
58 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
59 nullptr,
60 2 * page_size,
61 PROT_READ | PROT_WRITE,
62 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000063 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080064 &error_msg);
65 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070066 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080067 ASSERT_TRUE(base0 != nullptr) << error_msg;
68 size_t size0 = m0->Size();
69 EXPECT_EQ(m0->Size(), 2 * page_size);
70 EXPECT_EQ(BaseBegin(m0), base0);
71 EXPECT_EQ(BaseSize(m0), size0);
72 memset(base0, 42, 2 * page_size);
73 // Remap the latter half into a second MemMap.
74 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
75 "MemMapTest_RemapAtEndTest_map1",
76 PROT_READ | PROT_WRITE,
David Sehrd1dbb742017-07-17 11:20:38 -070077 MAP_PRIVATE,
Ian Rogersef7d42f2014-01-06 12:55:46 -080078 &error_msg);
79 // Check the states of the two maps.
80 EXPECT_EQ(m0->Begin(), base0) << error_msg;
81 EXPECT_EQ(m0->Size(), page_size);
82 EXPECT_EQ(BaseBegin(m0), base0);
83 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -070084 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080085 size_t size1 = m1->Size();
86 EXPECT_EQ(base1, base0 + page_size);
87 EXPECT_EQ(size1, page_size);
88 EXPECT_EQ(BaseBegin(m1), base1);
89 EXPECT_EQ(BaseSize(m1), size1);
90 // Write to the second region.
91 memset(base1, 43, page_size);
92 // Check the contents of the two regions.
93 for (size_t i = 0; i < page_size; ++i) {
94 EXPECT_EQ(base0[i], 42);
95 }
96 for (size_t i = 0; i < page_size; ++i) {
97 EXPECT_EQ(base1[i], 43);
98 }
99 // Unmap the first region.
100 delete m0;
101 // Make sure the second region is still accessible after the first
102 // region is unmapped.
103 for (size_t i = 0; i < page_size; ++i) {
104 EXPECT_EQ(base1[i], 43);
105 }
106 delete m1;
107 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700108
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700109 void CommonInit() {
110 MemMap::Init();
111 }
112
Andreas Gamped8f26db2014-05-19 17:01:13 -0700113#if defined(__LP64__) && !defined(__x86_64__)
114 static uintptr_t GetLinearScanPos() {
115 return MemMap::next_mem_pos_;
116 }
117#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700118};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700119
Andreas Gamped8f26db2014-05-19 17:01:13 -0700120#if defined(__LP64__) && !defined(__x86_64__)
121
122#ifdef __BIONIC__
123extern uintptr_t CreateStartPos(uint64_t input);
124#endif
125
126TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700127 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700128 uintptr_t start = GetLinearScanPos();
129 EXPECT_LE(64 * KB, start);
130 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700131#ifdef __BIONIC__
132 // Test a couple of values. Make sure they are different.
133 uintptr_t last = 0;
134 for (size_t i = 0; i < 100; ++i) {
135 uintptr_t random_start = CreateStartPos(i * kPageSize);
136 EXPECT_NE(last, random_start);
137 last = random_start;
138 }
139
140 // Even on max, should be below ART_BASE_ADDRESS.
141 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
142#endif
143 // End of test.
144}
145#endif
146
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700147TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700148 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700149 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700150 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000151 nullptr,
152 0,
153 PROT_READ,
154 false,
155 false,
156 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800157 ASSERT_TRUE(map.get() != nullptr) << error_msg;
158 ASSERT_TRUE(error_msg.empty());
159 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
160 nullptr,
161 kPageSize,
162 PROT_READ | PROT_WRITE,
163 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000164 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800165 &error_msg));
166 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700167 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700168}
169
Mathieu Chartier486932a2016-02-24 10:09:23 -0800170TEST_F(MemMapTest, MapAnonymousFailNullError) {
171 CommonInit();
172 // Test that we don't crash with a null error_str when mapping at an invalid location.
173 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
174 reinterpret_cast<uint8_t*>(kPageSize),
175 0x20000,
176 PROT_READ | PROT_WRITE,
177 false,
178 false,
179 nullptr));
180 ASSERT_EQ(nullptr, map.get());
181}
182
Ian Rogersef7d42f2014-01-06 12:55:46 -0800183#ifdef __LP64__
184TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700185 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700186 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700187 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000188 nullptr,
189 kPageSize,
190 PROT_READ | PROT_WRITE,
191 true,
192 false,
193 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800194 ASSERT_TRUE(map.get() != nullptr) << error_msg;
195 ASSERT_TRUE(error_msg.empty());
196 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700197}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800198TEST_F(MemMapTest, MapFile32Bit) {
199 CommonInit();
200 std::string error_msg;
201 ScratchFile scratch_file;
202 constexpr size_t kMapSize = kPageSize;
203 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
204 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
205 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
206 PROT_READ,
207 MAP_PRIVATE,
208 scratch_file.GetFd(),
209 /*start*/0,
210 /*low_4gb*/true,
211 scratch_file.GetFilename().c_str(),
212 &error_msg));
213 ASSERT_TRUE(map != nullptr) << error_msg;
214 ASSERT_TRUE(error_msg.empty());
215 ASSERT_EQ(map->Size(), kMapSize);
216 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
217}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800218#endif
219
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700220TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700221 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700222 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800223 // Find a valid address.
224 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700225 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700226 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800227 valid_address,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000228 kPageSize,
229 PROT_READ | PROT_WRITE,
230 false,
231 false,
232 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700233 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
234 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800235 ASSERT_TRUE(map0->BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700236 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700237 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000238 nullptr,
239 kPageSize,
240 PROT_READ | PROT_WRITE,
241 false,
242 false,
243 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700244 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
245 ASSERT_TRUE(error_msg.empty());
246 ASSERT_TRUE(map1->BaseBegin() != nullptr);
247 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700248 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000249 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
250 kPageSize,
251 PROT_READ | PROT_WRITE,
252 false,
253 false,
254 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700255 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
256 ASSERT_TRUE(!error_msg.empty());
257}
258
Ian Rogersef7d42f2014-01-06 12:55:46 -0800259TEST_F(MemMapTest, RemapAtEnd) {
260 RemapAtEndTest(false);
261}
262
263#ifdef __LP64__
264TEST_F(MemMapTest, RemapAtEnd32bit) {
265 RemapAtEndTest(true);
266}
267#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700268
Qiming Shi84d49cc2014-04-24 15:38:41 +0800269TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000270 // Some MIPS32 hardware (namely the Creator Ci20 development board)
271 // cannot allocate in the 2GB-4GB region.
272 TEST_DISABLED_FOR_MIPS();
273
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700274 CommonInit();
Andreas Gampe928f72b2014-09-09 19:53:48 -0700275 // This test may not work under valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700276 if (RUNNING_ON_MEMORY_TOOL == 0) {
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800277 constexpr size_t size = 0x100000;
278 // Try all addresses starting from 2GB to 4GB.
279 size_t start_addr = 2 * GB;
Andreas Gampe928f72b2014-09-09 19:53:48 -0700280 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800281 std::unique_ptr<MemMap> map;
282 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
283 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
284 reinterpret_cast<uint8_t*>(start_addr),
285 size,
286 PROT_READ | PROT_WRITE,
287 /*low_4gb*/true,
288 false,
289 &error_msg));
290 if (map != nullptr) {
291 break;
292 }
293 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700294 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Roland Levillain8d026442016-01-19 17:30:33 +0000295 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
Andreas Gampe928f72b2014-09-09 19:53:48 -0700296 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800297 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
Andreas Gampe928f72b2014-09-09 19:53:48 -0700298 }
Qiming Shi84d49cc2014-04-24 15:38:41 +0800299}
300
301TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700302 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800303 std::string error_msg;
304 uintptr_t ptr = 0;
305 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700306 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000307 reinterpret_cast<uint8_t*>(ptr),
308 2 * kPageSize, // brings it over the top.
309 PROT_READ | PROT_WRITE,
310 false,
311 false,
312 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800313 ASSERT_EQ(nullptr, map.get());
314 ASSERT_FALSE(error_msg.empty());
315}
316
317#ifdef __LP64__
318TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700319 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800320 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000321 std::unique_ptr<MemMap> map(
322 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
323 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
324 kPageSize,
325 PROT_READ | PROT_WRITE,
326 true,
327 false,
328 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800329 ASSERT_EQ(nullptr, map.get());
330 ASSERT_FALSE(error_msg.empty());
331}
332
333TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700334 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800335 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700336 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000337 reinterpret_cast<uint8_t*>(0xF0000000),
338 0x20000000,
339 PROT_READ | PROT_WRITE,
340 true,
341 false,
342 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800343 ASSERT_EQ(nullptr, map.get());
344 ASSERT_FALSE(error_msg.empty());
345}
346#endif
347
Vladimir Marko5c42c292015-02-25 12:02:49 +0000348TEST_F(MemMapTest, MapAnonymousReuse) {
349 CommonInit();
350 std::string error_msg;
351 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
352 nullptr,
353 0x20000,
354 PROT_READ | PROT_WRITE,
355 false,
356 false,
357 &error_msg));
358 ASSERT_NE(nullptr, map.get());
359 ASSERT_TRUE(error_msg.empty());
360 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
361 reinterpret_cast<uint8_t*>(map->BaseBegin()),
362 0x10000,
363 PROT_READ | PROT_WRITE,
364 false,
365 true,
366 &error_msg));
367 ASSERT_NE(nullptr, map2.get());
368 ASSERT_TRUE(error_msg.empty());
369}
370
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700371TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700372 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700373 std::string error_msg;
374 constexpr size_t kNumPages = 3;
375 // Map a 3-page mem map.
376 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
377 nullptr,
378 kPageSize * kNumPages,
379 PROT_READ | PROT_WRITE,
380 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000381 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700382 &error_msg));
383 ASSERT_TRUE(map.get() != nullptr) << error_msg;
384 ASSERT_TRUE(error_msg.empty());
385 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700386 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700387 // Unmap it.
388 map.reset();
389
390 // Map at the same address, but in page-sized separate mem maps,
391 // assuming the space at the address is still available.
392 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
393 map_base,
394 kPageSize,
395 PROT_READ | PROT_WRITE,
396 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000397 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700398 &error_msg));
399 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
400 ASSERT_TRUE(error_msg.empty());
401 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
402 map_base + kPageSize,
403 kPageSize,
404 PROT_READ | PROT_WRITE,
405 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000406 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700407 &error_msg));
408 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
409 ASSERT_TRUE(error_msg.empty());
410 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
411 map_base + kPageSize * 2,
412 kPageSize,
413 PROT_READ | PROT_WRITE,
414 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000415 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700416 &error_msg));
417 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
418 ASSERT_TRUE(error_msg.empty());
419
420 // One-map cases.
421 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
422 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
423 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
424
425 // Two or three-map cases.
426 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
427 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
428 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
429
430 // Unmap the middle one.
431 map1.reset();
432
433 // Should return false now that there's a gap in the middle.
434 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
435}
436
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800437TEST_F(MemMapTest, AlignBy) {
438 CommonInit();
439 std::string error_msg;
440 // Cast the page size to size_t.
441 const size_t page_size = static_cast<size_t>(kPageSize);
442 // Map a region.
443 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
444 nullptr,
445 14 * page_size,
446 PROT_READ | PROT_WRITE,
447 false,
448 false,
449 &error_msg));
450 uint8_t* base0 = m0->Begin();
451 ASSERT_TRUE(base0 != nullptr) << error_msg;
452 ASSERT_EQ(m0->Size(), 14 * page_size);
453 ASSERT_EQ(BaseBegin(m0.get()), base0);
454 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
455
456 // Break it into several regions by using RemapAtEnd.
457 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
458 "MemMapTest_AlignByTest_map1",
459 PROT_READ | PROT_WRITE,
David Sehrd1dbb742017-07-17 11:20:38 -0700460 MAP_PRIVATE,
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800461 &error_msg));
462 uint8_t* base1 = m1->Begin();
463 ASSERT_TRUE(base1 != nullptr) << error_msg;
464 ASSERT_EQ(base1, base0 + 3 * page_size);
465 ASSERT_EQ(m0->Size(), 3 * page_size);
466
467 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
468 "MemMapTest_AlignByTest_map2",
469 PROT_READ | PROT_WRITE,
David Sehrd1dbb742017-07-17 11:20:38 -0700470 MAP_PRIVATE,
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800471 &error_msg));
472 uint8_t* base2 = m2->Begin();
473 ASSERT_TRUE(base2 != nullptr) << error_msg;
474 ASSERT_EQ(base2, base1 + 4 * page_size);
475 ASSERT_EQ(m1->Size(), 4 * page_size);
476
477 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
478 "MemMapTest_AlignByTest_map1",
479 PROT_READ | PROT_WRITE,
David Sehrd1dbb742017-07-17 11:20:38 -0700480 MAP_PRIVATE,
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800481 &error_msg));
482 uint8_t* base3 = m3->Begin();
483 ASSERT_TRUE(base3 != nullptr) << error_msg;
484 ASSERT_EQ(base3, base2 + 3 * page_size);
485 ASSERT_EQ(m2->Size(), 3 * page_size);
486 ASSERT_EQ(m3->Size(), 4 * page_size);
487
488 uint8_t* end0 = base0 + m0->Size();
489 uint8_t* end1 = base1 + m1->Size();
490 uint8_t* end2 = base2 + m2->Size();
491 uint8_t* end3 = base3 + m3->Size();
492
493 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
494
495 if (IsAlignedParam(base0, 2 * page_size)) {
496 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
497 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
498 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
499 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
500 } else {
501 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
502 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
503 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
504 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
505 }
506
507 // Align by 2 * page_size;
508 m0->AlignBy(2 * page_size);
509 m1->AlignBy(2 * page_size);
510 m2->AlignBy(2 * page_size);
511 m3->AlignBy(2 * page_size);
512
513 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
514 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
515 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
516 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
517
518 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
519 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
520 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
521 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
522
523 if (IsAlignedParam(base0, 2 * page_size)) {
524 EXPECT_EQ(m0->Begin(), base0);
525 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
526 EXPECT_EQ(m1->Begin(), base1 + page_size);
527 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
528 EXPECT_EQ(m2->Begin(), base2 + page_size);
529 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
530 EXPECT_EQ(m3->Begin(), base3);
531 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
532 } else {
533 EXPECT_EQ(m0->Begin(), base0 + page_size);
534 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
535 EXPECT_EQ(m1->Begin(), base1);
536 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
537 EXPECT_EQ(m2->Begin(), base2);
538 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
539 EXPECT_EQ(m3->Begin(), base3 + page_size);
540 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
541 }
542}
543
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700544} // namespace art