blob: 13bf5b76985875a6a0bdad8672cc5b7b1138c523 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Evgenii Stepanov1e133742015-05-20 12:30:59 -070021#include "base/memory_tool.h"
Andreas Gampe928f72b2014-09-09 19:53:48 -070022
Brian Carlstrom9004cb62013-07-26 15:48:31 -070023#include "gtest/gtest.h"
24
25namespace art {
26
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070027class MemMapTest : public testing::Test {
28 public:
Ian Rogers13735952014-10-08 12:43:28 -070029 static uint8_t* BaseBegin(MemMap* mem_map) {
30 return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070031 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080032 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070033 return mem_map->base_size_;
34 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080035
36 static void RemapAtEndTest(bool low_4gb) {
37 std::string error_msg;
38 // Cast the page size to size_t.
39 const size_t page_size = static_cast<size_t>(kPageSize);
40 // Map a two-page memory region.
41 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
42 nullptr,
43 2 * page_size,
44 PROT_READ | PROT_WRITE,
45 low_4gb,
Vladimir Marko5c42c292015-02-25 12:02:49 +000046 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -080047 &error_msg);
48 // Check its state and write to it.
Ian Rogers13735952014-10-08 12:43:28 -070049 uint8_t* base0 = m0->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080050 ASSERT_TRUE(base0 != nullptr) << error_msg;
51 size_t size0 = m0->Size();
52 EXPECT_EQ(m0->Size(), 2 * page_size);
53 EXPECT_EQ(BaseBegin(m0), base0);
54 EXPECT_EQ(BaseSize(m0), size0);
55 memset(base0, 42, 2 * page_size);
56 // Remap the latter half into a second MemMap.
57 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
58 "MemMapTest_RemapAtEndTest_map1",
59 PROT_READ | PROT_WRITE,
60 &error_msg);
61 // Check the states of the two maps.
62 EXPECT_EQ(m0->Begin(), base0) << error_msg;
63 EXPECT_EQ(m0->Size(), page_size);
64 EXPECT_EQ(BaseBegin(m0), base0);
65 EXPECT_EQ(BaseSize(m0), page_size);
Ian Rogers13735952014-10-08 12:43:28 -070066 uint8_t* base1 = m1->Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080067 size_t size1 = m1->Size();
68 EXPECT_EQ(base1, base0 + page_size);
69 EXPECT_EQ(size1, page_size);
70 EXPECT_EQ(BaseBegin(m1), base1);
71 EXPECT_EQ(BaseSize(m1), size1);
72 // Write to the second region.
73 memset(base1, 43, page_size);
74 // Check the contents of the two regions.
75 for (size_t i = 0; i < page_size; ++i) {
76 EXPECT_EQ(base0[i], 42);
77 }
78 for (size_t i = 0; i < page_size; ++i) {
79 EXPECT_EQ(base1[i], 43);
80 }
81 // Unmap the first region.
82 delete m0;
83 // Make sure the second region is still accessible after the first
84 // region is unmapped.
85 for (size_t i = 0; i < page_size; ++i) {
86 EXPECT_EQ(base1[i], 43);
87 }
88 delete m1;
89 }
Andreas Gamped8f26db2014-05-19 17:01:13 -070090
Mathieu Chartier6e88ef62014-10-14 15:01:24 -070091 void CommonInit() {
92 MemMap::Init();
93 }
94
Andreas Gamped8f26db2014-05-19 17:01:13 -070095#if defined(__LP64__) && !defined(__x86_64__)
96 static uintptr_t GetLinearScanPos() {
97 return MemMap::next_mem_pos_;
98 }
99#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700100};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700101
Andreas Gamped8f26db2014-05-19 17:01:13 -0700102#if defined(__LP64__) && !defined(__x86_64__)
103
104#ifdef __BIONIC__
105extern uintptr_t CreateStartPos(uint64_t input);
106#endif
107
108TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700109 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700110 uintptr_t start = GetLinearScanPos();
111 EXPECT_LE(64 * KB, start);
112 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700113#ifdef __BIONIC__
114 // Test a couple of values. Make sure they are different.
115 uintptr_t last = 0;
116 for (size_t i = 0; i < 100; ++i) {
117 uintptr_t random_start = CreateStartPos(i * kPageSize);
118 EXPECT_NE(last, random_start);
119 last = random_start;
120 }
121
122 // Even on max, should be below ART_BASE_ADDRESS.
123 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
124#endif
125 // End of test.
126}
127#endif
128
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700129TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700130 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700131 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700132 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000133 nullptr,
134 0,
135 PROT_READ,
136 false,
137 false,
138 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800139 ASSERT_TRUE(map.get() != nullptr) << error_msg;
140 ASSERT_TRUE(error_msg.empty());
141 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
142 nullptr,
143 kPageSize,
144 PROT_READ | PROT_WRITE,
145 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000146 false,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800147 &error_msg));
148 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700149 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700150}
151
Ian Rogersef7d42f2014-01-06 12:55:46 -0800152#ifdef __LP64__
153TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700154 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700155 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700156 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000157 nullptr,
158 kPageSize,
159 PROT_READ | PROT_WRITE,
160 true,
161 false,
162 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800163 ASSERT_TRUE(map.get() != nullptr) << error_msg;
164 ASSERT_TRUE(error_msg.empty());
165 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700166}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800167#endif
168
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700169TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700170 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700171 std::string error_msg;
172 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700173 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000174 reinterpret_cast<uint8_t*>(ART_BASE_ADDRESS),
175 kPageSize,
176 PROT_READ | PROT_WRITE,
177 false,
178 false,
179 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700180 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
181 ASSERT_TRUE(error_msg.empty());
182 ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
183 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700184 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000185 nullptr,
186 kPageSize,
187 PROT_READ | PROT_WRITE,
188 false,
189 false,
190 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700191 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
192 ASSERT_TRUE(error_msg.empty());
193 ASSERT_TRUE(map1->BaseBegin() != nullptr);
194 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700195 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000196 reinterpret_cast<uint8_t*>(map1->BaseBegin()),
197 kPageSize,
198 PROT_READ | PROT_WRITE,
199 false,
200 false,
201 &error_msg));
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700202 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
203 ASSERT_TRUE(!error_msg.empty());
204}
205
Ian Rogersef7d42f2014-01-06 12:55:46 -0800206TEST_F(MemMapTest, RemapAtEnd) {
207 RemapAtEndTest(false);
208}
209
210#ifdef __LP64__
211TEST_F(MemMapTest, RemapAtEnd32bit) {
212 RemapAtEndTest(true);
213}
214#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700215
Qiming Shi84d49cc2014-04-24 15:38:41 +0800216TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700217 CommonInit();
Andreas Gampe928f72b2014-09-09 19:53:48 -0700218 // This test may not work under valgrind.
Evgenii Stepanov1e133742015-05-20 12:30:59 -0700219 if (RUNNING_ON_MEMORY_TOOL == 0) {
Andreas Gampe928f72b2014-09-09 19:53:48 -0700220 uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
221 std::string error_msg;
222 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
Ian Rogers13735952014-10-08 12:43:28 -0700223 reinterpret_cast<uint8_t*>(start_addr),
Andreas Gampe928f72b2014-09-09 19:53:48 -0700224 0x21000000,
225 PROT_READ | PROT_WRITE,
226 true,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000227 false,
Andreas Gampe928f72b2014-09-09 19:53:48 -0700228 &error_msg));
229 ASSERT_TRUE(map.get() != nullptr) << error_msg;
230 ASSERT_TRUE(error_msg.empty());
231 ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
232 }
Qiming Shi84d49cc2014-04-24 15:38:41 +0800233}
234
235TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700236 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800237 std::string error_msg;
238 uintptr_t ptr = 0;
239 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700240 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000241 reinterpret_cast<uint8_t*>(ptr),
242 2 * kPageSize, // brings it over the top.
243 PROT_READ | PROT_WRITE,
244 false,
245 false,
246 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800247 ASSERT_EQ(nullptr, map.get());
248 ASSERT_FALSE(error_msg.empty());
249}
250
251#ifdef __LP64__
252TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700253 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800254 std::string error_msg;
Vladimir Marko5c42c292015-02-25 12:02:49 +0000255 std::unique_ptr<MemMap> map(
256 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
257 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
258 kPageSize,
259 PROT_READ | PROT_WRITE,
260 true,
261 false,
262 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800263 ASSERT_EQ(nullptr, map.get());
264 ASSERT_FALSE(error_msg.empty());
265}
266
267TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700268 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800269 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700270 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko5c42c292015-02-25 12:02:49 +0000271 reinterpret_cast<uint8_t*>(0xF0000000),
272 0x20000000,
273 PROT_READ | PROT_WRITE,
274 true,
275 false,
276 &error_msg));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800277 ASSERT_EQ(nullptr, map.get());
278 ASSERT_FALSE(error_msg.empty());
279}
280#endif
281
Vladimir Marko5c42c292015-02-25 12:02:49 +0000282TEST_F(MemMapTest, MapAnonymousReuse) {
283 CommonInit();
284 std::string error_msg;
285 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
286 nullptr,
287 0x20000,
288 PROT_READ | PROT_WRITE,
289 false,
290 false,
291 &error_msg));
292 ASSERT_NE(nullptr, map.get());
293 ASSERT_TRUE(error_msg.empty());
294 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
295 reinterpret_cast<uint8_t*>(map->BaseBegin()),
296 0x10000,
297 PROT_READ | PROT_WRITE,
298 false,
299 true,
300 &error_msg));
301 ASSERT_NE(nullptr, map2.get());
302 ASSERT_TRUE(error_msg.empty());
303}
304
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700305TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700306 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700307 std::string error_msg;
308 constexpr size_t kNumPages = 3;
309 // Map a 3-page mem map.
310 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
311 nullptr,
312 kPageSize * kNumPages,
313 PROT_READ | PROT_WRITE,
314 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000315 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700316 &error_msg));
317 ASSERT_TRUE(map.get() != nullptr) << error_msg;
318 ASSERT_TRUE(error_msg.empty());
319 // Record the base address.
Ian Rogers13735952014-10-08 12:43:28 -0700320 uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700321 // Unmap it.
322 map.reset();
323
324 // Map at the same address, but in page-sized separate mem maps,
325 // assuming the space at the address is still available.
326 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
327 map_base,
328 kPageSize,
329 PROT_READ | PROT_WRITE,
330 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000331 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700332 &error_msg));
333 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
334 ASSERT_TRUE(error_msg.empty());
335 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
336 map_base + kPageSize,
337 kPageSize,
338 PROT_READ | PROT_WRITE,
339 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000340 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700341 &error_msg));
342 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
343 ASSERT_TRUE(error_msg.empty());
344 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
345 map_base + kPageSize * 2,
346 kPageSize,
347 PROT_READ | PROT_WRITE,
348 false,
Vladimir Marko5c42c292015-02-25 12:02:49 +0000349 false,
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700350 &error_msg));
351 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
352 ASSERT_TRUE(error_msg.empty());
353
354 // One-map cases.
355 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
356 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
357 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
358
359 // Two or three-map cases.
360 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
361 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
362 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
363
364 // Unmap the middle one.
365 map1.reset();
366
367 // Should return false now that there's a gap in the middle.
368 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
369}
370
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700371} // namespace art