blob: fe76c9255b55cc73e51498f7e208ed01d5591ef9 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Ian Rogers700a4022014-05-19 16:49:03 -070019#include <memory>
20
Brian Carlstrom9004cb62013-07-26 15:48:31 -070021#include "gtest/gtest.h"
22
23namespace art {
24
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070025class MemMapTest : public testing::Test {
26 public:
Ian Rogersef7d42f2014-01-06 12:55:46 -080027 static byte* BaseBegin(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070028 return reinterpret_cast<byte*>(mem_map->base_begin_);
29 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080030 static size_t BaseSize(MemMap* mem_map) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070031 return mem_map->base_size_;
32 }
Ian Rogersef7d42f2014-01-06 12:55:46 -080033
34 static void RemapAtEndTest(bool low_4gb) {
35 std::string error_msg;
36 // Cast the page size to size_t.
37 const size_t page_size = static_cast<size_t>(kPageSize);
38 // Map a two-page memory region.
39 MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
40 nullptr,
41 2 * page_size,
42 PROT_READ | PROT_WRITE,
43 low_4gb,
44 &error_msg);
45 // Check its state and write to it.
46 byte* base0 = m0->Begin();
47 ASSERT_TRUE(base0 != nullptr) << error_msg;
48 size_t size0 = m0->Size();
49 EXPECT_EQ(m0->Size(), 2 * page_size);
50 EXPECT_EQ(BaseBegin(m0), base0);
51 EXPECT_EQ(BaseSize(m0), size0);
52 memset(base0, 42, 2 * page_size);
53 // Remap the latter half into a second MemMap.
54 MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
55 "MemMapTest_RemapAtEndTest_map1",
56 PROT_READ | PROT_WRITE,
57 &error_msg);
58 // Check the states of the two maps.
59 EXPECT_EQ(m0->Begin(), base0) << error_msg;
60 EXPECT_EQ(m0->Size(), page_size);
61 EXPECT_EQ(BaseBegin(m0), base0);
62 EXPECT_EQ(BaseSize(m0), page_size);
63 byte* base1 = m1->Begin();
64 size_t size1 = m1->Size();
65 EXPECT_EQ(base1, base0 + page_size);
66 EXPECT_EQ(size1, page_size);
67 EXPECT_EQ(BaseBegin(m1), base1);
68 EXPECT_EQ(BaseSize(m1), size1);
69 // Write to the second region.
70 memset(base1, 43, page_size);
71 // Check the contents of the two regions.
72 for (size_t i = 0; i < page_size; ++i) {
73 EXPECT_EQ(base0[i], 42);
74 }
75 for (size_t i = 0; i < page_size; ++i) {
76 EXPECT_EQ(base1[i], 43);
77 }
78 // Unmap the first region.
79 delete m0;
80 // Make sure the second region is still accessible after the first
81 // region is unmapped.
82 for (size_t i = 0; i < page_size; ++i) {
83 EXPECT_EQ(base1[i], 43);
84 }
85 delete m1;
86 }
Andreas Gamped8f26db2014-05-19 17:01:13 -070087
88#if defined(__LP64__) && !defined(__x86_64__)
89 static uintptr_t GetLinearScanPos() {
90 return MemMap::next_mem_pos_;
91 }
92#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070093};
Brian Carlstrom9004cb62013-07-26 15:48:31 -070094
Andreas Gamped8f26db2014-05-19 17:01:13 -070095#if defined(__LP64__) && !defined(__x86_64__)
96
97#ifdef __BIONIC__
98extern uintptr_t CreateStartPos(uint64_t input);
99#endif
100
101TEST_F(MemMapTest, Start) {
102 uintptr_t start = GetLinearScanPos();
103 EXPECT_LE(64 * KB, start);
104 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
105
106#ifdef __BIONIC__
107 // Test a couple of values. Make sure they are different.
108 uintptr_t last = 0;
109 for (size_t i = 0; i < 100; ++i) {
110 uintptr_t random_start = CreateStartPos(i * kPageSize);
111 EXPECT_NE(last, random_start);
112 last = random_start;
113 }
114
115 // Even on max, should be below ART_BASE_ADDRESS.
116 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
117#endif
118 // End of test.
119}
120#endif
121
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700122TEST_F(MemMapTest, MapAnonymousEmpty) {
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700123 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700124 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800125 nullptr,
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700126 0,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700127 PROT_READ,
Ian Rogersef7d42f2014-01-06 12:55:46 -0800128 false,
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700129 &error_msg));
Ian Rogersef7d42f2014-01-06 12:55:46 -0800130 ASSERT_TRUE(map.get() != nullptr) << error_msg;
131 ASSERT_TRUE(error_msg.empty());
132 map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
133 nullptr,
134 kPageSize,
135 PROT_READ | PROT_WRITE,
136 false,
137 &error_msg));
138 ASSERT_TRUE(map.get() != nullptr) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700139 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700140}
141
Ian Rogersef7d42f2014-01-06 12:55:46 -0800142#ifdef __LP64__
143TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700144 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700145 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
Ian Rogersef7d42f2014-01-06 12:55:46 -0800146 nullptr,
147 kPageSize,
148 PROT_READ | PROT_WRITE,
149 true,
150 &error_msg));
151 ASSERT_TRUE(map.get() != nullptr) << error_msg;
152 ASSERT_TRUE(error_msg.empty());
153 ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700154}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800155#endif
156
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700157TEST_F(MemMapTest, MapAnonymousExactAddr) {
158 std::string error_msg;
159 // Map at an address that should work, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700160 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700161 reinterpret_cast<byte*>(ART_BASE_ADDRESS),
162 kPageSize,
163 PROT_READ | PROT_WRITE,
164 false,
165 &error_msg));
166 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
167 ASSERT_TRUE(error_msg.empty());
168 ASSERT_TRUE(map0->BaseBegin() == reinterpret_cast<void*>(ART_BASE_ADDRESS));
169 // Map at an unspecified address, which should succeed.
Ian Rogers700a4022014-05-19 16:49:03 -0700170 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700171 nullptr,
172 kPageSize,
173 PROT_READ | PROT_WRITE,
174 false,
175 &error_msg));
176 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
177 ASSERT_TRUE(error_msg.empty());
178 ASSERT_TRUE(map1->BaseBegin() != nullptr);
179 // Attempt to map at the same address, which should fail.
Ian Rogers700a4022014-05-19 16:49:03 -0700180 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700181 reinterpret_cast<byte*>(map1->BaseBegin()),
182 kPageSize,
183 PROT_READ | PROT_WRITE,
184 false,
185 &error_msg));
186 ASSERT_TRUE(map2.get() == nullptr) << error_msg;
187 ASSERT_TRUE(!error_msg.empty());
188}
189
Ian Rogersef7d42f2014-01-06 12:55:46 -0800190TEST_F(MemMapTest, RemapAtEnd) {
191 RemapAtEndTest(false);
192}
193
194#ifdef __LP64__
195TEST_F(MemMapTest, RemapAtEnd32bit) {
196 RemapAtEndTest(true);
197}
198#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700199
Qiming Shi84d49cc2014-04-24 15:38:41 +0800200TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
201 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700202 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800203 reinterpret_cast<byte*>(0x71000000),
204 0x21000000,
205 PROT_READ | PROT_WRITE,
206 true,
207 &error_msg));
208 ASSERT_TRUE(map.get() != nullptr) << error_msg;
209 ASSERT_TRUE(error_msg.empty());
210 ASSERT_EQ(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 0x71000000U);
211}
212
213TEST_F(MemMapTest, MapAnonymousOverflow) {
214 std::string error_msg;
215 uintptr_t ptr = 0;
216 ptr -= kPageSize; // Now it's close to the top.
Ian Rogers700a4022014-05-19 16:49:03 -0700217 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800218 reinterpret_cast<byte*>(ptr),
219 2 * kPageSize, // brings it over the top.
220 PROT_READ | PROT_WRITE,
221 false,
222 &error_msg));
223 ASSERT_EQ(nullptr, map.get());
224 ASSERT_FALSE(error_msg.empty());
225}
226
227#ifdef __LP64__
228TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
229 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700230 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800231 reinterpret_cast<byte*>(UINT64_C(0x100000000)),
232 kPageSize,
233 PROT_READ | PROT_WRITE,
234 true,
235 &error_msg));
236 ASSERT_EQ(nullptr, map.get());
237 ASSERT_FALSE(error_msg.empty());
238}
239
240TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
241 std::string error_msg;
Ian Rogers700a4022014-05-19 16:49:03 -0700242 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Qiming Shi84d49cc2014-04-24 15:38:41 +0800243 reinterpret_cast<byte*>(0xF0000000),
244 0x20000000,
245 PROT_READ | PROT_WRITE,
246 true,
247 &error_msg));
248 ASSERT_EQ(nullptr, map.get());
249 ASSERT_FALSE(error_msg.empty());
250}
251#endif
252
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700253TEST_F(MemMapTest, CheckNoGaps) {
254 std::string error_msg;
255 constexpr size_t kNumPages = 3;
256 // Map a 3-page mem map.
257 std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
258 nullptr,
259 kPageSize * kNumPages,
260 PROT_READ | PROT_WRITE,
261 false,
262 &error_msg));
263 ASSERT_TRUE(map.get() != nullptr) << error_msg;
264 ASSERT_TRUE(error_msg.empty());
265 // Record the base address.
266 byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
267 // Unmap it.
268 map.reset();
269
270 // Map at the same address, but in page-sized separate mem maps,
271 // assuming the space at the address is still available.
272 std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
273 map_base,
274 kPageSize,
275 PROT_READ | PROT_WRITE,
276 false,
277 &error_msg));
278 ASSERT_TRUE(map0.get() != nullptr) << error_msg;
279 ASSERT_TRUE(error_msg.empty());
280 std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
281 map_base + kPageSize,
282 kPageSize,
283 PROT_READ | PROT_WRITE,
284 false,
285 &error_msg));
286 ASSERT_TRUE(map1.get() != nullptr) << error_msg;
287 ASSERT_TRUE(error_msg.empty());
288 std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
289 map_base + kPageSize * 2,
290 kPageSize,
291 PROT_READ | PROT_WRITE,
292 false,
293 &error_msg));
294 ASSERT_TRUE(map2.get() != nullptr) << error_msg;
295 ASSERT_TRUE(error_msg.empty());
296
297 // One-map cases.
298 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
299 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
300 ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
301
302 // Two or three-map cases.
303 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
304 ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
305 ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
306
307 // Unmap the middle one.
308 map1.reset();
309
310 // Should return false now that there's a gap in the middle.
311 ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
312}
313
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700314} // namespace art