blob: b2f5c728e463017761d12578772c79ab3e1fa5eb [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
David Sehrd5f8de82018-04-27 14:12:03 -070024#include "base/common_art_test.h"
25#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
David Sehr1979c642018-04-26 14:41:18 -070026#include "memory_tool.h"
27#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070028
29namespace art {
30
David Sehrd5f8de82018-04-27 14:12:03 -070031class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070032 public:
Alex Lightca97ada2018-02-02 09:25:31 -080033 static bool IsAddressMapped(void* addr) {
34 bool res = msync(addr, 1, MS_SYNC) == 0;
35 if (!res && errno != ENOMEM) {
36 PLOG(FATAL) << "Unexpected error occurred on msync";
37 }
38 return res;
39 }
40
41 static std::vector<uint8_t> RandomData(size_t size) {
42 std::random_device rd;
43 std::uniform_int_distribution<uint8_t> dist;
44 std::vector<uint8_t> res;
45 res.resize(size);
46 for (size_t i = 0; i < size; i++) {
47 res[i] = dist(rd);
48 }
49 return res;
50 }
51
Mathieu Chartier16d29f82015-11-10 10:32:52 -080052 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
53 // Find a valid map address and unmap it before returning.
54 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010055 MemMap map = MemMap::MapAnonymous("temp",
56 /* addr */ nullptr,
57 size,
58 PROT_READ,
59 low_4gb,
60 /* reuse */ false,
61 &error_msg);
62 CHECK(map.IsValid());
63 return map.Begin();
Mathieu Chartier16d29f82015-11-10 10:32:52 -080064 }
65
Ian Rogersef7d42f2014-01-06 12:55:46 -080066 static void RemapAtEndTest(bool low_4gb) {
67 std::string error_msg;
68 // Cast the page size to size_t.
69 const size_t page_size = static_cast<size_t>(kPageSize);
70 // Map a two-page memory region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010071 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
72 /* addr */ nullptr,
73 2 * page_size,
74 PROT_READ | PROT_WRITE,
75 low_4gb,
76 /* reuse */ false,
77 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080078 // Check its state and write to it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010079 ASSERT_TRUE(m0.IsValid());
80 uint8_t* base0 = m0.Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080081 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010082 size_t size0 = m0.Size();
83 EXPECT_EQ(m0.Size(), 2 * page_size);
84 EXPECT_EQ(m0.BaseBegin(), base0);
85 EXPECT_EQ(m0.BaseSize(), size0);
Ian Rogersef7d42f2014-01-06 12:55:46 -080086 memset(base0, 42, 2 * page_size);
87 // Remap the latter half into a second MemMap.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010088 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
89 "MemMapTest_RemapAtEndTest_map1",
90 PROT_READ | PROT_WRITE,
91 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080092 // Check the states of the two maps.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010093 EXPECT_EQ(m0.Begin(), base0) << error_msg;
94 EXPECT_EQ(m0.Size(), page_size);
95 EXPECT_EQ(m0.BaseBegin(), base0);
96 EXPECT_EQ(m0.BaseSize(), page_size);
97 uint8_t* base1 = m1.Begin();
98 size_t size1 = m1.Size();
Ian Rogersef7d42f2014-01-06 12:55:46 -080099 EXPECT_EQ(base1, base0 + page_size);
100 EXPECT_EQ(size1, page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100101 EXPECT_EQ(m1.BaseBegin(), base1);
102 EXPECT_EQ(m1.BaseSize(), size1);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800103 // Write to the second region.
104 memset(base1, 43, page_size);
105 // Check the contents of the two regions.
106 for (size_t i = 0; i < page_size; ++i) {
107 EXPECT_EQ(base0[i], 42);
108 }
109 for (size_t i = 0; i < page_size; ++i) {
110 EXPECT_EQ(base1[i], 43);
111 }
112 // Unmap the first region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100113 m0.Reset();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800114 // Make sure the second region is still accessible after the first
115 // region is unmapped.
116 for (size_t i = 0; i < page_size; ++i) {
117 EXPECT_EQ(base1[i], 43);
118 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100119 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
120 "MemMapTest_RemapAtEndTest_map1",
121 PROT_READ | PROT_WRITE,
122 &error_msg);
123 ASSERT_TRUE(m2.IsValid()) << error_msg;
124 ASSERT_FALSE(m1.IsValid());
Ian Rogersef7d42f2014-01-06 12:55:46 -0800125 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700126
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700127 void CommonInit() {
128 MemMap::Init();
129 }
130
Andreas Gamped8f26db2014-05-19 17:01:13 -0700131#if defined(__LP64__) && !defined(__x86_64__)
132 static uintptr_t GetLinearScanPos() {
133 return MemMap::next_mem_pos_;
134 }
135#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700136};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700137
Andreas Gamped8f26db2014-05-19 17:01:13 -0700138#if defined(__LP64__) && !defined(__x86_64__)
139
140#ifdef __BIONIC__
141extern uintptr_t CreateStartPos(uint64_t input);
142#endif
143
144TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700145 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700146 uintptr_t start = GetLinearScanPos();
147 EXPECT_LE(64 * KB, start);
148 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700149#ifdef __BIONIC__
150 // Test a couple of values. Make sure they are different.
151 uintptr_t last = 0;
152 for (size_t i = 0; i < 100; ++i) {
153 uintptr_t random_start = CreateStartPos(i * kPageSize);
154 EXPECT_NE(last, random_start);
155 last = random_start;
156 }
157
158 // Even on max, should be below ART_BASE_ADDRESS.
159 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
160#endif
161 // End of test.
162}
163#endif
164
Alex Lightca97ada2018-02-02 09:25:31 -0800165// We need mremap to be able to test ReplaceMapping at all
166#if HAVE_MREMAP_SYSCALL
167TEST_F(MemMapTest, ReplaceMapping_SameSize) {
168 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100169 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
170 /* addr */ nullptr,
171 kPageSize,
172 PROT_READ,
173 /* low_4gb */ false,
174 /* reuse */ false,
175 &error_msg);
176 ASSERT_TRUE(dest.IsValid());
177 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
178 /* addr */ nullptr,
179 kPageSize,
180 PROT_WRITE | PROT_READ,
181 /* low_4gb */ false,
182 /* reuse */ false,
183 &error_msg);
184 ASSERT_TRUE(source.IsValid());
185 void* source_addr = source.Begin();
186 void* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800187 ASSERT_TRUE(IsAddressMapped(source_addr));
188 ASSERT_TRUE(IsAddressMapped(dest_addr));
189
190 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100191 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800192
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100193 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800194
195 ASSERT_FALSE(IsAddressMapped(source_addr));
196 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100197 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800198
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100199 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800200
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100201 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800202}
203
204TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
205 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100206 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
207 /* addr */ nullptr,
208 5 * kPageSize, // Need to make it larger
209 // initially so we know
210 // there won't be mappings
211 // in the way we we move
212 // source.
213 PROT_READ,
214 /* low_4gb */ false,
215 /* reuse */ false,
216 &error_msg);
217 ASSERT_TRUE(dest.IsValid());
218 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
219 /* addr */ nullptr,
220 3 * kPageSize,
221 PROT_WRITE | PROT_READ,
222 /* low_4gb */ false,
223 /* reuse */ false,
224 &error_msg);
225 ASSERT_TRUE(source.IsValid());
226 uint8_t* source_addr = source.Begin();
227 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800228 ASSERT_TRUE(IsAddressMapped(source_addr));
229
230 // Fill the source with random data.
231 std::vector<uint8_t> data = RandomData(3 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100232 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800233
234 // Make the dest smaller so that we know we'll have space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100235 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800236
237 ASSERT_TRUE(IsAddressMapped(dest_addr));
238 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100239 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800240
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100241 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800242
243 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100244 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800245 ASSERT_TRUE(IsAddressMapped(dest_addr));
246 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100247 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800248
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100249 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800250}
251
252TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
253 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100254 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
255 /* addr */ nullptr,
256 3 * kPageSize,
257 PROT_READ,
258 /* low_4gb */ false,
259 /* reuse */ false,
260 &error_msg);
261 ASSERT_TRUE(dest.IsValid());
262 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
263 /* addr */ nullptr,
264 kPageSize,
265 PROT_WRITE | PROT_READ,
266 /* low_4gb */ false,
267 /* reuse */ false,
268 &error_msg);
269 ASSERT_TRUE(source.IsValid());
270 uint8_t* source_addr = source.Begin();
271 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800272 ASSERT_TRUE(IsAddressMapped(source_addr));
273 ASSERT_TRUE(IsAddressMapped(dest_addr));
274 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100275 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800276
277 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100278 memcpy(source.Begin(), data.data(), kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800279
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100280 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800281
282 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100283 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800284 ASSERT_TRUE(IsAddressMapped(dest_addr));
285 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100286 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800287
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100288 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800289}
290
291TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
292 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100293 MemMap dest =
Alex Lightca97ada2018-02-02 09:25:31 -0800294 MemMap::MapAnonymous(
295 "MapAnonymousEmpty-atomic-replace-dest",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100296 /* addr */ nullptr,
Alex Lightca97ada2018-02-02 09:25:31 -0800297 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
298 // the way we we move source.
299 PROT_READ | PROT_WRITE,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100300 /* low_4gb */ false,
301 /* reuse */ false,
302 &error_msg);
303 ASSERT_TRUE(dest.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800304 // Resize down to 1 page so we can remap the rest.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100305 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800306 // Create source from the last 2 pages
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100307 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
308 dest.Begin() + kPageSize,
309 2 * kPageSize,
310 PROT_WRITE | PROT_READ,
311 /* low_4gb */ false,
312 /* reuse */ false,
313 &error_msg);
314 ASSERT_TRUE(source.IsValid());
315 ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
316 uint8_t* source_addr = source.Begin();
317 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800318 ASSERT_TRUE(IsAddressMapped(source_addr));
319
320 // Fill the source and dest with random data.
321 std::vector<uint8_t> data = RandomData(2 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100322 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800323 std::vector<uint8_t> dest_data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100324 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800325
326 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100327 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800328
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100329 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800330
Alex Lightca97ada2018-02-02 09:25:31 -0800331 ASSERT_TRUE(IsAddressMapped(source_addr));
332 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100333 ASSERT_EQ(source.Size(), data.size());
334 ASSERT_EQ(dest.Size(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800335
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100336 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
337 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800338}
339#endif // HAVE_MREMAP_SYSCALL
340
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700341TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700342 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700343 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100344 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
345 /* addr */ nullptr,
346 0,
347 PROT_READ,
348 /* low_4gb */ false,
349 /* reuse */ false,
350 &error_msg);
351 ASSERT_FALSE(map.IsValid()) << error_msg;
352 ASSERT_FALSE(error_msg.empty());
353
354 error_msg.clear();
355 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
356 /* addr */ nullptr,
357 kPageSize,
358 PROT_READ | PROT_WRITE,
359 /* low_4gb */ false,
360 /* reuse */ false,
361 &error_msg);
362 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700363 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700364}
365
Mathieu Chartier486932a2016-02-24 10:09:23 -0800366TEST_F(MemMapTest, MapAnonymousFailNullError) {
367 CommonInit();
368 // Test that we don't crash with a null error_str when mapping at an invalid location.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100369 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
370 reinterpret_cast<uint8_t*>(kPageSize),
371 0x20000,
372 PROT_READ | PROT_WRITE,
373 /* low_4gb */ false,
374 /* reuse */ false,
375 nullptr);
376 ASSERT_FALSE(map.IsValid());
Mathieu Chartier486932a2016-02-24 10:09:23 -0800377}
378
Ian Rogersef7d42f2014-01-06 12:55:46 -0800379#ifdef __LP64__
380TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700381 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700382 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100383 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
384 /* addr */ nullptr,
385 0,
386 PROT_READ,
387 /* low_4gb */ true,
388 /* reuse */ false,
389 &error_msg);
390 ASSERT_FALSE(map.IsValid()) << error_msg;
391 ASSERT_FALSE(error_msg.empty());
392
393 error_msg.clear();
394 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
395 /* addr */ nullptr,
396 kPageSize,
397 PROT_READ | PROT_WRITE,
398 /* low_4gb */ true,
399 /* reuse */ false,
400 &error_msg);
401 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800402 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100403 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700404}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800405TEST_F(MemMapTest, MapFile32Bit) {
406 CommonInit();
407 std::string error_msg;
408 ScratchFile scratch_file;
409 constexpr size_t kMapSize = kPageSize;
410 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
411 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100412 MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
413 PROT_READ,
414 MAP_PRIVATE,
415 scratch_file.GetFd(),
416 /*start*/0,
417 /*low_4gb*/true,
418 scratch_file.GetFilename().c_str(),
419 &error_msg);
420 ASSERT_TRUE(map.IsValid()) << error_msg;
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800421 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100422 ASSERT_EQ(map.Size(), kMapSize);
423 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800424}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800425#endif
426
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700427TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700428 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700429 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800430 // Find a valid address.
431 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700432 // Map at an address that should work, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100433 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
434 valid_address,
435 kPageSize,
436 PROT_READ | PROT_WRITE,
437 /* low_4gb */ false,
438 /* reuse */ false,
439 &error_msg);
440 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700441 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100442 ASSERT_TRUE(map0.BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700443 // Map at an unspecified address, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100444 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
445 /* addr */ nullptr,
446 kPageSize,
447 PROT_READ | PROT_WRITE,
448 /* low_4gb */ false,
449 /* reuse */ false,
450 &error_msg);
451 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700452 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100453 ASSERT_TRUE(map1.BaseBegin() != nullptr);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700454 // Attempt to map at the same address, which should fail.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100455 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
456 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
457 kPageSize,
458 PROT_READ | PROT_WRITE,
459 /* low_4gb */ false,
460 /* reuse */ false,
461 &error_msg);
462 ASSERT_FALSE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700463 ASSERT_TRUE(!error_msg.empty());
464}
465
Ian Rogersef7d42f2014-01-06 12:55:46 -0800466TEST_F(MemMapTest, RemapAtEnd) {
467 RemapAtEndTest(false);
468}
469
470#ifdef __LP64__
471TEST_F(MemMapTest, RemapAtEnd32bit) {
472 RemapAtEndTest(true);
473}
474#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700475
Qiming Shi84d49cc2014-04-24 15:38:41 +0800476TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000477 // Some MIPS32 hardware (namely the Creator Ci20 development board)
478 // cannot allocate in the 2GB-4GB region.
479 TEST_DISABLED_FOR_MIPS();
480
Roland Levillain0b0d3b42018-06-14 13:55:49 +0100481 // This test does not work under AddressSanitizer.
482 // Historical note: This test did not work under Valgrind either.
Roland Levillain05e34f42018-05-24 13:19:05 +0000483 TEST_DISABLED_FOR_MEMORY_TOOL();
484
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700485 CommonInit();
Roland Levillain05e34f42018-05-24 13:19:05 +0000486 constexpr size_t size = 0x100000;
487 // Try all addresses starting from 2GB to 4GB.
488 size_t start_addr = 2 * GB;
489 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100490 MemMap map;
Roland Levillain05e34f42018-05-24 13:19:05 +0000491 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100492 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
493 reinterpret_cast<uint8_t*>(start_addr),
494 size,
495 PROT_READ | PROT_WRITE,
496 /*low_4gb*/ true,
497 /* reuse */ false,
498 &error_msg);
499 if (map.IsValid()) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000500 break;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800501 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700502 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100503 ASSERT_TRUE(map.IsValid()) << error_msg;
504 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
Roland Levillain05e34f42018-05-24 13:19:05 +0000505 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100506 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800507}
508
509TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700510 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800511 std::string error_msg;
512 uintptr_t ptr = 0;
513 ptr -= kPageSize; // Now it's close to the top.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100514 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
515 reinterpret_cast<uint8_t*>(ptr),
516 2 * kPageSize, // brings it over the top.
517 PROT_READ | PROT_WRITE,
518 /* low_4gb */ false,
519 /* reuse */ false,
520 &error_msg);
521 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800522 ASSERT_FALSE(error_msg.empty());
523}
524
525#ifdef __LP64__
526TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700527 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800528 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100529 MemMap map =
Vladimir Marko5c42c292015-02-25 12:02:49 +0000530 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
531 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
532 kPageSize,
533 PROT_READ | PROT_WRITE,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100534 /* low_4gb */ true,
535 /* reuse */ false,
536 &error_msg);
537 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800538 ASSERT_FALSE(error_msg.empty());
539}
540
541TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700542 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800543 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100544 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
545 reinterpret_cast<uint8_t*>(0xF0000000),
546 0x20000000,
547 PROT_READ | PROT_WRITE,
548 /* low_4gb */ true,
549 /* reuse */ false,
550 &error_msg);
551 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800552 ASSERT_FALSE(error_msg.empty());
553}
554#endif
555
Vladimir Marko5c42c292015-02-25 12:02:49 +0000556TEST_F(MemMapTest, MapAnonymousReuse) {
557 CommonInit();
558 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100559 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
560 nullptr,
561 0x20000,
562 PROT_READ | PROT_WRITE,
563 /* low_4gb */ false,
564 /* reuse */ false,
565 &error_msg);
566 ASSERT_TRUE(map.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000567 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100568 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
569 reinterpret_cast<uint8_t*>(map.BaseBegin()),
570 0x10000,
571 PROT_READ | PROT_WRITE,
572 /* low_4gb */ false,
573 /* reuse */ true,
574 &error_msg);
575 ASSERT_TRUE(map2.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000576 ASSERT_TRUE(error_msg.empty());
577}
578
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700579TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700580 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700581 std::string error_msg;
582 constexpr size_t kNumPages = 3;
583 // Map a 3-page mem map.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100584 MemMap map = MemMap::MapAnonymous("MapAnonymous0",
585 /* addr */ nullptr,
586 kPageSize * kNumPages,
587 PROT_READ | PROT_WRITE,
588 /* low_4gb */ false,
589 /* reuse */ false,
590 &error_msg);
591 ASSERT_TRUE(map.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700592 ASSERT_TRUE(error_msg.empty());
593 // Record the base address.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100594 uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700595 // Unmap it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100596 map.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700597
598 // Map at the same address, but in page-sized separate mem maps,
599 // assuming the space at the address is still available.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100600 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
601 map_base,
602 kPageSize,
603 PROT_READ | PROT_WRITE,
604 /* low_4gb */ false,
605 /* reuse */ false,
606 &error_msg);
607 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700608 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100609 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
610 map_base + kPageSize,
611 kPageSize,
612 PROT_READ | PROT_WRITE,
613 /* low_4gb */ false,
614 /* reuse */ false,
615 &error_msg);
616 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700617 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100618 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
619 map_base + kPageSize * 2,
620 kPageSize,
621 PROT_READ | PROT_WRITE,
622 /* low_4gb */ false,
623 /* reuse */ false,
624 &error_msg);
625 ASSERT_TRUE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700626 ASSERT_TRUE(error_msg.empty());
627
628 // One-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100629 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
630 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
631 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700632
633 // Two or three-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100634 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
635 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
636 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700637
638 // Unmap the middle one.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100639 map1.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700640
641 // Should return false now that there's a gap in the middle.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100642 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700643}
644
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800645TEST_F(MemMapTest, AlignBy) {
646 CommonInit();
647 std::string error_msg;
648 // Cast the page size to size_t.
649 const size_t page_size = static_cast<size_t>(kPageSize);
650 // Map a region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100651 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
652 /* addr */ nullptr,
653 14 * page_size,
654 PROT_READ | PROT_WRITE,
655 /* low_4gb */ false,
656 /* reuse */ false,
657 &error_msg);
658 ASSERT_TRUE(m0.IsValid());
659 uint8_t* base0 = m0.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800660 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100661 ASSERT_EQ(m0.Size(), 14 * page_size);
662 ASSERT_EQ(m0.BaseBegin(), base0);
663 ASSERT_EQ(m0.BaseSize(), m0.Size());
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800664
665 // Break it into several regions by using RemapAtEnd.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100666 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
667 "MemMapTest_AlignByTest_map1",
668 PROT_READ | PROT_WRITE,
669 &error_msg);
670 uint8_t* base1 = m1.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800671 ASSERT_TRUE(base1 != nullptr) << error_msg;
672 ASSERT_EQ(base1, base0 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100673 ASSERT_EQ(m0.Size(), 3 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800674
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100675 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
676 "MemMapTest_AlignByTest_map2",
677 PROT_READ | PROT_WRITE,
678 &error_msg);
679 uint8_t* base2 = m2.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800680 ASSERT_TRUE(base2 != nullptr) << error_msg;
681 ASSERT_EQ(base2, base1 + 4 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100682 ASSERT_EQ(m1.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800683
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100684 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
685 "MemMapTest_AlignByTest_map1",
686 PROT_READ | PROT_WRITE,
687 &error_msg);
688 uint8_t* base3 = m3.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800689 ASSERT_TRUE(base3 != nullptr) << error_msg;
690 ASSERT_EQ(base3, base2 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100691 ASSERT_EQ(m2.Size(), 3 * page_size);
692 ASSERT_EQ(m3.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800693
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100694 uint8_t* end0 = base0 + m0.Size();
695 uint8_t* end1 = base1 + m1.Size();
696 uint8_t* end2 = base2 + m2.Size();
697 uint8_t* end3 = base3 + m3.Size();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800698
699 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
700
701 if (IsAlignedParam(base0, 2 * page_size)) {
702 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
703 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
704 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
705 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
706 } else {
707 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
708 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
709 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
710 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
711 }
712
713 // Align by 2 * page_size;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100714 m0.AlignBy(2 * page_size);
715 m1.AlignBy(2 * page_size);
716 m2.AlignBy(2 * page_size);
717 m3.AlignBy(2 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800718
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100719 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
720 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
721 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
722 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800723
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100724 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
725 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
726 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
727 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800728
729 if (IsAlignedParam(base0, 2 * page_size)) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100730 EXPECT_EQ(m0.Begin(), base0);
731 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
732 EXPECT_EQ(m1.Begin(), base1 + page_size);
733 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
734 EXPECT_EQ(m2.Begin(), base2 + page_size);
735 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
736 EXPECT_EQ(m3.Begin(), base3);
737 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800738 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100739 EXPECT_EQ(m0.Begin(), base0 + page_size);
740 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
741 EXPECT_EQ(m1.Begin(), base1);
742 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
743 EXPECT_EQ(m2.Begin(), base2);
744 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
745 EXPECT_EQ(m3.Begin(), base3 + page_size);
746 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800747 }
748}
749
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700750} // namespace art