blob: 69f618c5e685dcd072582eb58477374356e8d207 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Brian Carlstrom9004cb62013-07-26 15:48:31 -070021#include "gtest/gtest.h"
22
23namespace art {
24
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070025class MemMapTest : public testing::Test {
26 public:
Ian Rogersef7d42f2014-01-06 12:55:46 -080027 static byte* BaseBegin(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070028 return reinterpret_cast<byte*>(mem_map->base_begin_);
29 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080030 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070031 return mem_map->base_size_;
32 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080033
34 static void RemapAtEndTest(bool low_4gb) {
35 std::string error_msg;
36 // Cast the page size to size_t.
37 const size_t page_size = static_cast<size_t>(kPageSize);
38 // Map a two-page memory region.
39 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
40 nullptr,
41 2 * page_size,
42 PROT_READ | PROT_WRITE,
43 low_4gb,
44 &error_msg);
45 // Check its state and write to it.
46 byte* base0 = m0->Begin();
47 ASSERT_TRUE(base0 != nullptr) << error_msg;
48 size_t size0 = m0->Size();
49 EXPECT_EQ(m0->Size(), 2 * page_size);
50 EXPECT_EQ(BaseBegin(m0), base0);
51 EXPECT_EQ(BaseSize(m0), size0);
52 memset(base0, 42, 2 * page_size);
53 // Remap the latter half into a second MemMap.
54 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
55 "MemMapTest_RemapAtEndTest_map1",
56 PROT_READ | PROT_WRITE,
57 &error_msg);
58 // Check the states of the two maps.
59 EXPECT_EQ(m0->Begin(), base0) << error_msg;
60 EXPECT_EQ(m0->Size(), page_size);
61 EXPECT_EQ(BaseBegin(m0), base0);
62 EXPECT_EQ(BaseSize(m0), page_size);
63 byte* base1 = m1->Begin();
64 size_t size1 = m1->Size();
65 EXPECT_EQ(base1, base0 + page_size);
66 EXPECT_EQ(size1, page_size);
67 EXPECT_EQ(BaseBegin(m1), base1);
68 EXPECT_EQ(BaseSize(m1), size1);
69 // Write to the second region.
70 memset(base1, 43, page_size);
71 // Check the contents of the two regions.
72 for (size_t i = 0; i < page_size; ++i) {
73 EXPECT_EQ(base0[i], 42);
74 }
75 for (size_t i = 0; i < page_size; ++i) {
76 EXPECT_EQ(base1[i], 43);
77 }
78 // Unmap the first region.
79 delete m0;
80 // Make sure the second region is still accessible after the first
81 // region is unmapped.
82 for (size_t i = 0; i < page_size; ++i) {
83 EXPECT_EQ(base1[i], 43);
84 }
85 delete m1;
86 }
Andreas Gamped8f26db2014-05-19 17:01:13 -070087
88#if defined(__LP64__) && !defined(__x86_64__)
89 static uintptr_t GetLinearScanPos() {
90 return MemMap::next_mem_pos_;
91 }
92#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070093};
Brian Carlstrom9004cb62013-07-26 15:48:31 -070094
Andreas Gamped8f26db2014-05-19 17:01:13 -070095#if defined(__LP64__) && !defined(__x86_64__)
96
97#ifdef __BIONIC__
98extern uintptr_t CreateStartPos(uint64_t input);
99#endif
100
101TEST_F(MemMapTest, Start) {
102 uintptr_t start = GetLinearScanPos();
103 EXPECT_LE(64 * KB, start);
104 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
105
106#ifdef __BIONIC__
107 // Test a couple of values. Make sure they are different.
108 uintptr_t last = 0;
109 for (size_t i = 0; i < 100; ++i) {
110 uintptr_t random_start = CreateStartPos(i * kPageSize);
111 EXPECT_NE(last, random_start);
112 last = random_start;
113 }
114
115 // Even on max, should be below ART_BASE_ADDRESS.
116 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
117#endif
118 // End of test.
119}
120#endif
121
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700122TEST_F(MemMapTest, MapAnonymousEmpty) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700123 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700124 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800125 nullptr,
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700126 0,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700127 PROT_READ,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800128 false,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700129 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800130 ASSERT_TRUE(map.get() != nullptr) << error_msg;
131 ASSERT_TRUE(error_msg.empty());
132 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
133 nullptr,
134 kPageSize,
135 PROT_READ | PROT_WRITE,
136 false,
137 &error_msg));
138 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700139 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700140}
141
Ian Rogersef7d42f2014-01-06 12:55:46 -0800142#ifdef __LP64__
143TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700144 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700145 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800146 nullptr,
147 kPageSize,
148 PROT_READ | PROT_WRITE,
149 true,
150 &error_msg));
151 ASSERT_TRUE(map.get() != nullptr) << error_msg;
152 ASSERT_TRUE(error_msg.empty());
153 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700154}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800155#endif
156
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700157TEST_F(MemMapTest, MapAnonymousExactAddr) {
158 std::string error_msg;
159 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700160 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700161 reinterpret_cast<byte*>(ART_BASE_ADDRESS),
162 kPageSize,
163 PROT_READ | PROT_WRITE,
164 false,
165 &error_msg));
166 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
167 ASSERT_TRUE(error_msg.empty());
168 ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
169 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700170 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700171 nullptr,
172 kPageSize,
173 PROT_READ | PROT_WRITE,
174 false,
175 &error_msg));
176 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
177 ASSERT_TRUE(error_msg.empty());
178 ASSERT_TRUE(map1->BaseBegin() != nullptr);
179 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700180 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700181 reinterpret_cast<byte*>(map1->BaseBegin()),
182 kPageSize,
183 PROT_READ | PROT_WRITE,
184 false,
185 &error_msg));
186 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
187 ASSERT_TRUE(!error_msg.empty());
188}
189
Ian Rogersef7d42f2014-01-06 12:55:46 -0800190TEST_F(MemMapTest, RemapAtEnd) {
191 RemapAtEndTest(false);
192}
193
194#ifdef __LP64__
195TEST_F(MemMapTest, RemapAtEnd32bit) {
196 RemapAtEndTest(true);
197}
198#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700199
Qiming Shi84d49cc2014-04-24 15:38:41 +0800200TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Douglas Leung859c2552014-06-11 11:47:09 -0700201 uintptr_t start_addr = ART_BASE_ADDRESS + 0x1000000;
Qiming Shi84d49cc2014-04-24 15:38:41 +0800202 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700203 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
Douglas Leung859c2552014-06-11 11:47:09 -0700204 reinterpret_cast<byte*>(start_addr),
Qiming Shi84d49cc2014-04-24 15:38:41 +0800205 0x21000000,
206 PROT_READ | PROT_WRITE,
207 true,
208 &error_msg));
209 ASSERT_TRUE(map.get() != nullptr) << error_msg;
210 ASSERT_TRUE(error_msg.empty());
Douglas Leung859c2552014-06-11 11:47:09 -0700211 ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), start_addr);
Qiming Shi84d49cc2014-04-24 15:38:41 +0800212}
213
214TEST_F(MemMapTest, MapAnonymousOverflow) {
215 std::string error_msg;
216 uintptr_t ptr = 0;
217 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700218 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800219 reinterpret_cast<byte*>(ptr),
220 2 * kPageSize, // brings it over the top.
221 PROT_READ | PROT_WRITE,
222 false,
223 &error_msg));
224 ASSERT_EQ(nullptr, map.get());
225 ASSERT_FALSE(error_msg.empty());
226}
227
228#ifdef __LP64__
229TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
230 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700231 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800232 reinterpret_cast<byte*>(UINT64_C(0x100000000)),
233 kPageSize,
234 PROT_READ | PROT_WRITE,
235 true,
236 &error_msg));
237 ASSERT_EQ(nullptr, map.get());
238 ASSERT_FALSE(error_msg.empty());
239}
240
241TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
242 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700243 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800244 reinterpret_cast<byte*>(0xF0000000),
245 0x20000000,
246 PROT_READ | PROT_WRITE,
247 true,
248 &error_msg));
249 ASSERT_EQ(nullptr, map.get());
250 ASSERT_FALSE(error_msg.empty());
251}
252#endif
253
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700254TEST_F(MemMapTest, CheckNoGaps) {
255 std::string error_msg;
256 constexpr size_t kNumPages = 3;
257 // Map a 3-page mem map.
258 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
259 nullptr,
260 kPageSize * kNumPages,
261 PROT_READ | PROT_WRITE,
262 false,
263 &error_msg));
264 ASSERT_TRUE(map.get() != nullptr) << error_msg;
265 ASSERT_TRUE(error_msg.empty());
266 // Record the base address.
267 byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
268 // Unmap it.
269 map.reset();
270
271 // Map at the same address, but in page-sized separate mem maps,
272 // assuming the space at the address is still available.
273 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
274 map_base,
275 kPageSize,
276 PROT_READ | PROT_WRITE,
277 false,
278 &error_msg));
279 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
280 ASSERT_TRUE(error_msg.empty());
281 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
282 map_base + kPageSize,
283 kPageSize,
284 PROT_READ | PROT_WRITE,
285 false,
286 &error_msg));
287 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
288 ASSERT_TRUE(error_msg.empty());
289 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
290 map_base + kPageSize * 2,
291 kPageSize,
292 PROT_READ | PROT_WRITE,
293 false,
294 &error_msg));
295 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
296 ASSERT_TRUE(error_msg.empty());
297
298 // One-map cases.
299 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
300 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
301 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
302
303 // Two or three-map cases.
304 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
305 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
306 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
307
308 // Unmap the middle one.
309 map1.reset();
310
311 // Should return false now that there's a gap in the middle.
312 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
313}
314
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700315} // namespace art