blob: 5f027b11050af839f3b9c62ba64fb8aab53b446f [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
22
Mathieu Chartier42bddce2015-11-09 15:16:56 -080023#include "common_runtime_test.h"
Evgenii Stepanov1e133742015-05-20 12:30:59 -070024#include "base/memory_tool.h"
Mathieu Chartier42bddce2015-11-09 15:16:56 -080025#include "base/unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070026
27namespace art {
28
Mathieu Chartier42bddce2015-11-09 15:16:56 -080029class MemMapTest : public CommonRuntimeTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070030 public:
Ian Rogers13735952014-10-08 12:43:28 -070031 static uint8_t* BaseBegin(MemMap* mem_map) {
32 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070033 }
Mathieu Chartier16d29f82015-11-10 10:32:52 -080034
Ian Rogersef7d42f2014-01-06 12:55:46 -080035 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070036 return mem_map->base_size_;
37 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080038
Mathieu Chartier16d29f82015-11-10 10:32:52 -080039 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
40 // Find a valid map address and unmap it before returning.
41 std::string error_msg;
42 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
43 nullptr,
44 size,
45 PROT_READ,
46 low_4gb,
47 false,
48 &error_msg));
49 CHECK(map != nullptr);
50 return map->Begin();
51 }
52
Ian Rogersef7d42f2014-01-06 12:55:46 -080053 static void RemapAtEndTest(bool low_4gb) {
54 std::string error_msg;
55 // Cast the page size to size_t.
56 const size_t page_size = static_cast<size_t>(kPageSize);
57 // Map a two-page memory region.
58 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
59 nullptr,
60 2 * page_size,
61 PROT_READ | PROT_WRITE,
62 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000063 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080064 &error_msg);
65 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070066 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080067 ASSERT_TRUE(base0 != nullptr) << error_msg;
68 size_t size0 = m0->Size();
69 EXPECT_EQ(m0->Size(), 2 * page_size);
70 EXPECT_EQ(BaseBegin(m0), base0);
71 EXPECT_EQ(BaseSize(m0), size0);
72 memset(base0, 42, 2 * page_size);
73 // Remap the latter half into a second MemMap.
74 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
75 "MemMapTest_RemapAtEndTest_map1",
76 PROT_READ | PROT_WRITE,
77 &error_msg);
78 // Check the states of the two maps.
79 EXPECT_EQ(m0->Begin(), base0) << error_msg;
80 EXPECT_EQ(m0->Size(), page_size);
81 EXPECT_EQ(BaseBegin(m0), base0);
82 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -070083 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080084 size_t size1 = m1->Size();
85 EXPECT_EQ(base1, base0 + page_size);
86 EXPECT_EQ(size1, page_size);
87 EXPECT_EQ(BaseBegin(m1), base1);
88 EXPECT_EQ(BaseSize(m1), size1);
89 // Write to the second region.
90 memset(base1, 43, page_size);
91 // Check the contents of the two regions.
92 for (size_t i = 0; i < page_size; ++i) {
93 EXPECT_EQ(base0[i], 42);
94 }
95 for (size_t i = 0; i < page_size; ++i) {
96 EXPECT_EQ(base1[i], 43);
97 }
98 // Unmap the first region.
99 delete m0;
100 // Make sure the second region is still accessible after the first
101 // region is unmapped.
102 for (size_t i = 0; i < page_size; ++i) {
103 EXPECT_EQ(base1[i], 43);
104 }
105 delete m1;
106 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700107
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700108 void CommonInit() {
109 MemMap::Init();
110 }
111
Andreas Gamped8f26db2014-05-19 17:01:13 -0700112#if defined(__LP64__) && !defined(__x86_64__)
113 static uintptr_t GetLinearScanPos() {
114 return MemMap::next_mem_pos_;
115 }
116#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700117};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700118
Andreas Gamped8f26db2014-05-19 17:01:13 -0700119#if defined(__LP64__) && !defined(__x86_64__)
120
121#ifdef __BIONIC__
122extern uintptr_t CreateStartPos(uint64_t input);
123#endif
124
125TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700126 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700127 uintptr_t start = GetLinearScanPos();
128 EXPECT_LE(64 * KB, start);
129 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700130#ifdef __BIONIC__
131 // Test a couple of values. Make sure they are different.
132 uintptr_t last = 0;
133 for (size_t i = 0; i < 100; ++i) {
134 uintptr_t random_start = CreateStartPos(i * kPageSize);
135 EXPECT_NE(last, random_start);
136 last = random_start;
137 }
138
139 // Even on max, should be below ART_BASE_ADDRESS.
140 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
141#endif
142 // End of test.
143}
144#endif
145
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700146TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700147 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700148 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700149 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000150 nullptr,
151 0,
152 PROT_READ,
153 false,
154 false,
155 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800156 ASSERT_TRUE(map.get() != nullptr) << error_msg;
157 ASSERT_TRUE(error_msg.empty());
158 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
159 nullptr,
160 kPageSize,
161 PROT_READ | PROT_WRITE,
162 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000163 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800164 &error_msg));
165 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700166 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700167}
168
Mathieu Chartier486932a2016-02-24 10:09:23 -0800169TEST_F(MemMapTest, MapAnonymousFailNullError) {
170 CommonInit();
171 // Test that we don't crash with a null error_str when mapping at an invalid location.
172 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
173 reinterpret_cast<uint8_t*>(kPageSize),
174 0x20000,
175 PROT_READ | PROT_WRITE,
176 false,
177 false,
178 nullptr));
179 ASSERT_EQ(nullptr, map.get());
180}
181
Ian Rogersef7d42f2014-01-06 12:55:46 -0800182#ifdef __LP64__
183TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700184 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700185 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700186 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000187 nullptr,
188 kPageSize,
189 PROT_READ | PROT_WRITE,
190 true,
191 false,
192 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800193 ASSERT_TRUE(map.get() != nullptr) << error_msg;
194 ASSERT_TRUE(error_msg.empty());
195 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700196}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800197TEST_F(MemMapTest, MapFile32Bit) {
198 CommonInit();
199 std::string error_msg;
200 ScratchFile scratch_file;
201 constexpr size_t kMapSize = kPageSize;
202 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
203 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
204 std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
205 PROT_READ,
206 MAP_PRIVATE,
207 scratch_file.GetFd(),
208 /*start*/0,
209 /*low_4gb*/true,
210 scratch_file.GetFilename().c_str(),
211 &error_msg));
212 ASSERT_TRUE(map != nullptr) << error_msg;
213 ASSERT_TRUE(error_msg.empty());
214 ASSERT_EQ(map->Size(), kMapSize);
215 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
216}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800217#endif
218
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700219TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700220 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700221 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800222 // Find a valid address.
223 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700224 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700225 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800226 valid_address,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000227 kPageSize,
228 PROT_READ | PROT_WRITE,
229 false,
230 false,
231 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700232 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
233 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800234 ASSERT_TRUE(map0->BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700235 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700236 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000237 nullptr,
238 kPageSize,
239 PROT_READ | PROT_WRITE,
240 false,
241 false,
242 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700243 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
244 ASSERT_TRUE(error_msg.empty());
245 ASSERT_TRUE(map1->BaseBegin() != nullptr);
246 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700247 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000248 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
249 kPageSize,
250 PROT_READ | PROT_WRITE,
251 false,
252 false,
253 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700254 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
255 ASSERT_TRUE(!error_msg.empty());
256}
257
Ian Rogersef7d42f2014-01-06 12:55:46 -0800258TEST_F(MemMapTest, RemapAtEnd) {
259 RemapAtEndTest(false);
260}
261
262#ifdef __LP64__
263TEST_F(MemMapTest, RemapAtEnd32bit) {
264 RemapAtEndTest(true);
265}
266#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700267
Qiming Shi84d49cc2014-04-24 15:38:41 +0800268TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000269 // Some MIPS32 hardware (namely the Creator Ci20 development board)
270 // cannot allocate in the 2GB-4GB region.
271 TEST_DISABLED_FOR_MIPS();
272
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700273 CommonInit();
Andreas Gampe928f72b2014-09-09 19:53:48 -0700274 // This test may not work under valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700275 if (RUNNING_ON_MEMORY_TOOL == 0) {
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800276 constexpr size_t size = 0x100000;
277 // Try all addresses starting from 2GB to 4GB.
278 size_t start_addr = 2 * GB;
Andreas Gampe928f72b2014-09-09 19:53:48 -0700279 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800280 std::unique_ptr<MemMap> map;
281 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
282 map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
283 reinterpret_cast<uint8_t*>(start_addr),
284 size,
285 PROT_READ | PROT_WRITE,
286 /*low_4gb*/true,
287 false,
288 &error_msg));
289 if (map != nullptr) {
290 break;
291 }
292 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700293 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Roland Levillain8d026442016-01-19 17:30:33 +0000294 ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
Andreas Gampe928f72b2014-09-09 19:53:48 -0700295 ASSERT_TRUE(error_msg.empty());
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800296 ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
Andreas Gampe928f72b2014-09-09 19:53:48 -0700297 }
Qiming Shi84d49cc2014-04-24 15:38:41 +0800298}
299
300TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700301 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800302 std::string error_msg;
303 uintptr_t ptr = 0;
304 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700305 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000306 reinterpret_cast<uint8_t*>(ptr),
307 2 * kPageSize, // brings it over the top.
308 PROT_READ | PROT_WRITE,
309 false,
310 false,
311 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800312 ASSERT_EQ(nullptr, map.get());
313 ASSERT_FALSE(error_msg.empty());
314}
315
316#ifdef __LP64__
317TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700318 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800319 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000320 std::unique_ptr<MemMap> map(
321 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
322 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
323 kPageSize,
324 PROT_READ | PROT_WRITE,
325 true,
326 false,
327 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800328 ASSERT_EQ(nullptr, map.get());
329 ASSERT_FALSE(error_msg.empty());
330}
331
332TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700333 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800334 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700335 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000336 reinterpret_cast<uint8_t*>(0xF0000000),
337 0x20000000,
338 PROT_READ | PROT_WRITE,
339 true,
340 false,
341 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800342 ASSERT_EQ(nullptr, map.get());
343 ASSERT_FALSE(error_msg.empty());
344}
345#endif
346
Vladimir Marko5c42c292015-02-25 12:02:49 +0000347TEST_F(MemMapTest, MapAnonymousReuse) {
348 CommonInit();
349 std::string error_msg;
350 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
351 nullptr,
352 0x20000,
353 PROT_READ | PROT_WRITE,
354 false,
355 false,
356 &error_msg));
357 ASSERT_NE(nullptr, map.get());
358 ASSERT_TRUE(error_msg.empty());
359 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
360 reinterpret_cast<uint8_t*>(map->BaseBegin()),
361 0x10000,
362 PROT_READ | PROT_WRITE,
363 false,
364 true,
365 &error_msg));
366 ASSERT_NE(nullptr, map2.get());
367 ASSERT_TRUE(error_msg.empty());
368}
369
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700370TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700371 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700372 std::string error_msg;
373 constexpr size_t kNumPages = 3;
374 // Map a 3-page mem map.
375 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
376 nullptr,
377 kPageSize * kNumPages,
378 PROT_READ | PROT_WRITE,
379 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000380 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700381 &error_msg));
382 ASSERT_TRUE(map.get() != nullptr) << error_msg;
383 ASSERT_TRUE(error_msg.empty());
384 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700385 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700386 // Unmap it.
387 map.reset();
388
389 // Map at the same address, but in page-sized separate mem maps,
390 // assuming the space at the address is still available.
391 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
392 map_base,
393 kPageSize,
394 PROT_READ | PROT_WRITE,
395 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000396 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700397 &error_msg));
398 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
399 ASSERT_TRUE(error_msg.empty());
400 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
401 map_base + kPageSize,
402 kPageSize,
403 PROT_READ | PROT_WRITE,
404 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000405 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700406 &error_msg));
407 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
408 ASSERT_TRUE(error_msg.empty());
409 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
410 map_base + kPageSize * 2,
411 kPageSize,
412 PROT_READ | PROT_WRITE,
413 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000414 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700415 &error_msg));
416 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
417 ASSERT_TRUE(error_msg.empty());
418
419 // One-map cases.
420 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
421 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
422 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
423
424 // Two or three-map cases.
425 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
426 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
427 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
428
429 // Unmap the middle one.
430 map1.reset();
431
432 // Should return false now that there's a gap in the middle.
433 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
434}
435
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800436TEST_F(MemMapTest, AlignBy) {
437 CommonInit();
438 std::string error_msg;
439 // Cast the page size to size_t.
440 const size_t page_size = static_cast<size_t>(kPageSize);
441 // Map a region.
442 std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
443 nullptr,
444 14 * page_size,
445 PROT_READ | PROT_WRITE,
446 false,
447 false,
448 &error_msg));
449 uint8_t* base0 = m0->Begin();
450 ASSERT_TRUE(base0 != nullptr) << error_msg;
451 ASSERT_EQ(m0->Size(), 14 * page_size);
452 ASSERT_EQ(BaseBegin(m0.get()), base0);
453 ASSERT_EQ(BaseSize(m0.get()), m0->Size());
454
455 // Break it into several regions by using RemapAtEnd.
456 std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
457 "MemMapTest_AlignByTest_map1",
458 PROT_READ | PROT_WRITE,
459 &error_msg));
460 uint8_t* base1 = m1->Begin();
461 ASSERT_TRUE(base1 != nullptr) << error_msg;
462 ASSERT_EQ(base1, base0 + 3 * page_size);
463 ASSERT_EQ(m0->Size(), 3 * page_size);
464
465 std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
466 "MemMapTest_AlignByTest_map2",
467 PROT_READ | PROT_WRITE,
468 &error_msg));
469 uint8_t* base2 = m2->Begin();
470 ASSERT_TRUE(base2 != nullptr) << error_msg;
471 ASSERT_EQ(base2, base1 + 4 * page_size);
472 ASSERT_EQ(m1->Size(), 4 * page_size);
473
474 std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
475 "MemMapTest_AlignByTest_map1",
476 PROT_READ | PROT_WRITE,
477 &error_msg));
478 uint8_t* base3 = m3->Begin();
479 ASSERT_TRUE(base3 != nullptr) << error_msg;
480 ASSERT_EQ(base3, base2 + 3 * page_size);
481 ASSERT_EQ(m2->Size(), 3 * page_size);
482 ASSERT_EQ(m3->Size(), 4 * page_size);
483
484 uint8_t* end0 = base0 + m0->Size();
485 uint8_t* end1 = base1 + m1->Size();
486 uint8_t* end2 = base2 + m2->Size();
487 uint8_t* end3 = base3 + m3->Size();
488
489 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
490
491 if (IsAlignedParam(base0, 2 * page_size)) {
492 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
493 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
494 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
495 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
496 } else {
497 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
498 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
499 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
500 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
501 }
502
503 // Align by 2 * page_size;
504 m0->AlignBy(2 * page_size);
505 m1->AlignBy(2 * page_size);
506 m2->AlignBy(2 * page_size);
507 m3->AlignBy(2 * page_size);
508
509 EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
510 EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
511 EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
512 EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
513
514 EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
515 EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
516 EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
517 EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
518
519 if (IsAlignedParam(base0, 2 * page_size)) {
520 EXPECT_EQ(m0->Begin(), base0);
521 EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
522 EXPECT_EQ(m1->Begin(), base1 + page_size);
523 EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
524 EXPECT_EQ(m2->Begin(), base2 + page_size);
525 EXPECT_EQ(m2->Begin() + m2->Size(), end2);
526 EXPECT_EQ(m3->Begin(), base3);
527 EXPECT_EQ(m3->Begin() + m3->Size(), end3);
528 } else {
529 EXPECT_EQ(m0->Begin(), base0 + page_size);
530 EXPECT_EQ(m0->Begin() + m0->Size(), end0);
531 EXPECT_EQ(m1->Begin(), base1);
532 EXPECT_EQ(m1->Begin() + m1->Size(), end1);
533 EXPECT_EQ(m2->Begin(), base2);
534 EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
535 EXPECT_EQ(m3->Begin(), base3 + page_size);
536 EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
537 }
538}
539
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700540} // namespace art