blob: 5815cf99e7cb4b20f37e4c44fe1c6c1908c8ff47 [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
David Sehrd5f8de82018-04-27 14:12:03 -070024#include "base/common_art_test.h"
25#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
David Sehr1979c642018-04-26 14:41:18 -070026#include "memory_tool.h"
27#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070028
29namespace art {
30
David Sehrd5f8de82018-04-27 14:12:03 -070031class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070032 public:
Alex Lightca97ada2018-02-02 09:25:31 -080033 static bool IsAddressMapped(void* addr) {
34 bool res = msync(addr, 1, MS_SYNC) == 0;
35 if (!res && errno != ENOMEM) {
36 PLOG(FATAL) << "Unexpected error occurred on msync";
37 }
38 return res;
39 }
40
41 static std::vector<uint8_t> RandomData(size_t size) {
42 std::random_device rd;
43 std::uniform_int_distribution<uint8_t> dist;
44 std::vector<uint8_t> res;
45 res.resize(size);
46 for (size_t i = 0; i < size; i++) {
47 res[i] = dist(rd);
48 }
49 return res;
50 }
51
Mathieu Chartier16d29f82015-11-10 10:32:52 -080052 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
53 // Find a valid map address and unmap it before returning.
54 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010055 MemMap map = MemMap::MapAnonymous("temp",
Andreas Gampe0de385f2018-10-11 11:11:13 -070056 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010057 size,
58 PROT_READ,
59 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010060 &error_msg);
61 CHECK(map.IsValid());
62 return map.Begin();
Mathieu Chartier16d29f82015-11-10 10:32:52 -080063 }
64
Ian Rogersef7d42f2014-01-06 12:55:46 -080065 static void RemapAtEndTest(bool low_4gb) {
66 std::string error_msg;
67 // Cast the page size to size_t.
68 const size_t page_size = static_cast<size_t>(kPageSize);
69 // Map a two-page memory region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010070 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
Andreas Gampe0de385f2018-10-11 11:11:13 -070071 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010072 2 * page_size,
73 PROT_READ | PROT_WRITE,
74 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010075 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080076 // Check its state and write to it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010077 ASSERT_TRUE(m0.IsValid());
78 uint8_t* base0 = m0.Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080079 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010080 size_t size0 = m0.Size();
81 EXPECT_EQ(m0.Size(), 2 * page_size);
82 EXPECT_EQ(m0.BaseBegin(), base0);
83 EXPECT_EQ(m0.BaseSize(), size0);
Ian Rogersef7d42f2014-01-06 12:55:46 -080084 memset(base0, 42, 2 * page_size);
85 // Remap the latter half into a second MemMap.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010086 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
87 "MemMapTest_RemapAtEndTest_map1",
88 PROT_READ | PROT_WRITE,
89 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080090 // Check the states of the two maps.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010091 EXPECT_EQ(m0.Begin(), base0) << error_msg;
92 EXPECT_EQ(m0.Size(), page_size);
93 EXPECT_EQ(m0.BaseBegin(), base0);
94 EXPECT_EQ(m0.BaseSize(), page_size);
95 uint8_t* base1 = m1.Begin();
96 size_t size1 = m1.Size();
Ian Rogersef7d42f2014-01-06 12:55:46 -080097 EXPECT_EQ(base1, base0 + page_size);
98 EXPECT_EQ(size1, page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +010099 EXPECT_EQ(m1.BaseBegin(), base1);
100 EXPECT_EQ(m1.BaseSize(), size1);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800101 // Write to the second region.
102 memset(base1, 43, page_size);
103 // Check the contents of the two regions.
104 for (size_t i = 0; i < page_size; ++i) {
105 EXPECT_EQ(base0[i], 42);
106 }
107 for (size_t i = 0; i < page_size; ++i) {
108 EXPECT_EQ(base1[i], 43);
109 }
110 // Unmap the first region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100111 m0.Reset();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800112 // Make sure the second region is still accessible after the first
113 // region is unmapped.
114 for (size_t i = 0; i < page_size; ++i) {
115 EXPECT_EQ(base1[i], 43);
116 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100117 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
118 "MemMapTest_RemapAtEndTest_map1",
119 PROT_READ | PROT_WRITE,
120 &error_msg);
121 ASSERT_TRUE(m2.IsValid()) << error_msg;
122 ASSERT_FALSE(m1.IsValid());
Ian Rogersef7d42f2014-01-06 12:55:46 -0800123 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700124
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700125 void CommonInit() {
126 MemMap::Init();
127 }
128
Andreas Gamped8f26db2014-05-19 17:01:13 -0700129#if defined(__LP64__) && !defined(__x86_64__)
130 static uintptr_t GetLinearScanPos() {
131 return MemMap::next_mem_pos_;
132 }
133#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700134};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700135
Andreas Gamped8f26db2014-05-19 17:01:13 -0700136#if defined(__LP64__) && !defined(__x86_64__)
137
138#ifdef __BIONIC__
139extern uintptr_t CreateStartPos(uint64_t input);
140#endif
141
142TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700143 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700144 uintptr_t start = GetLinearScanPos();
145 EXPECT_LE(64 * KB, start);
146 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700147#ifdef __BIONIC__
148 // Test a couple of values. Make sure they are different.
149 uintptr_t last = 0;
150 for (size_t i = 0; i < 100; ++i) {
151 uintptr_t random_start = CreateStartPos(i * kPageSize);
152 EXPECT_NE(last, random_start);
153 last = random_start;
154 }
155
156 // Even on max, should be below ART_BASE_ADDRESS.
157 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
158#endif
159 // End of test.
160}
161#endif
162
Alex Lightca97ada2018-02-02 09:25:31 -0800163// We need mremap to be able to test ReplaceMapping at all
164#if HAVE_MREMAP_SYSCALL
165TEST_F(MemMapTest, ReplaceMapping_SameSize) {
166 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100167 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700168 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100169 kPageSize,
170 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700171 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100172 &error_msg);
173 ASSERT_TRUE(dest.IsValid());
174 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700175 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100176 kPageSize,
177 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700178 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100179 &error_msg);
180 ASSERT_TRUE(source.IsValid());
181 void* source_addr = source.Begin();
182 void* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800183 ASSERT_TRUE(IsAddressMapped(source_addr));
184 ASSERT_TRUE(IsAddressMapped(dest_addr));
185
186 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100187 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800188
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100189 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800190
191 ASSERT_FALSE(IsAddressMapped(source_addr));
192 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100193 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800194
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100195 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800196
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100197 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800198}
199
200TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
201 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100202 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700203 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100204 5 * kPageSize, // Need to make it larger
205 // initially so we know
206 // there won't be mappings
207 // in the way we we move
208 // source.
209 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700210 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100211 &error_msg);
212 ASSERT_TRUE(dest.IsValid());
213 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700214 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100215 3 * kPageSize,
216 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700217 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100218 &error_msg);
219 ASSERT_TRUE(source.IsValid());
220 uint8_t* source_addr = source.Begin();
221 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800222 ASSERT_TRUE(IsAddressMapped(source_addr));
223
224 // Fill the source with random data.
225 std::vector<uint8_t> data = RandomData(3 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100226 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800227
228 // Make the dest smaller so that we know we'll have space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100229 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800230
231 ASSERT_TRUE(IsAddressMapped(dest_addr));
232 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100233 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800234
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100235 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800236
237 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100238 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800239 ASSERT_TRUE(IsAddressMapped(dest_addr));
240 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100241 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800242
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100243 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800244}
245
246TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
247 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100248 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700249 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100250 3 * kPageSize,
251 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700252 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100253 &error_msg);
254 ASSERT_TRUE(dest.IsValid());
255 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700256 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100257 kPageSize,
258 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700259 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100260 &error_msg);
261 ASSERT_TRUE(source.IsValid());
262 uint8_t* source_addr = source.Begin();
263 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800264 ASSERT_TRUE(IsAddressMapped(source_addr));
265 ASSERT_TRUE(IsAddressMapped(dest_addr));
266 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100267 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800268
269 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100270 memcpy(source.Begin(), data.data(), kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800271
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100272 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800273
274 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100275 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800276 ASSERT_TRUE(IsAddressMapped(dest_addr));
277 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100278 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800279
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100280 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800281}
282
283TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
284 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100285 MemMap dest =
Alex Lightca97ada2018-02-02 09:25:31 -0800286 MemMap::MapAnonymous(
287 "MapAnonymousEmpty-atomic-replace-dest",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700288 /* addr= */ nullptr,
Alex Lightca97ada2018-02-02 09:25:31 -0800289 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
290 // the way we we move source.
291 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700292 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100293 &error_msg);
294 ASSERT_TRUE(dest.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800295 // Resize down to 1 page so we can remap the rest.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100296 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800297 // Create source from the last 2 pages
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100298 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
299 dest.Begin() + kPageSize,
300 2 * kPageSize,
301 PROT_WRITE | PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700302 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100303 &error_msg);
304 ASSERT_TRUE(source.IsValid());
305 ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
306 uint8_t* source_addr = source.Begin();
307 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800308 ASSERT_TRUE(IsAddressMapped(source_addr));
309
310 // Fill the source and dest with random data.
311 std::vector<uint8_t> data = RandomData(2 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100312 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800313 std::vector<uint8_t> dest_data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100314 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800315
316 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100317 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800318
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100319 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800320
Alex Lightca97ada2018-02-02 09:25:31 -0800321 ASSERT_TRUE(IsAddressMapped(source_addr));
322 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100323 ASSERT_EQ(source.Size(), data.size());
324 ASSERT_EQ(dest.Size(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800325
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100326 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
327 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800328}
329#endif // HAVE_MREMAP_SYSCALL
330
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700331TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700332 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700333 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100334 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700335 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100336 0,
337 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700338 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100339 &error_msg);
340 ASSERT_FALSE(map.IsValid()) << error_msg;
341 ASSERT_FALSE(error_msg.empty());
342
343 error_msg.clear();
344 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700345 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100346 kPageSize,
347 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700348 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100349 &error_msg);
350 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700351 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700352}
353
Mathieu Chartier486932a2016-02-24 10:09:23 -0800354TEST_F(MemMapTest, MapAnonymousFailNullError) {
355 CommonInit();
356 // Test that we don't crash with a null error_str when mapping at an invalid location.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100357 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
358 reinterpret_cast<uint8_t*>(kPageSize),
359 0x20000,
360 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700361 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100362 nullptr);
363 ASSERT_FALSE(map.IsValid());
Mathieu Chartier486932a2016-02-24 10:09:23 -0800364}
365
Ian Rogersef7d42f2014-01-06 12:55:46 -0800366#ifdef __LP64__
367TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700368 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700369 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100370 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700371 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100372 0,
373 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700374 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100375 &error_msg);
376 ASSERT_FALSE(map.IsValid()) << error_msg;
377 ASSERT_FALSE(error_msg.empty());
378
379 error_msg.clear();
380 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700381 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100382 kPageSize,
383 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700384 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100385 &error_msg);
386 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800387 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100388 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700389}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800390TEST_F(MemMapTest, MapFile32Bit) {
391 CommonInit();
392 std::string error_msg;
393 ScratchFile scratch_file;
394 constexpr size_t kMapSize = kPageSize;
395 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
396 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
Andreas Gampe0de385f2018-10-11 11:11:13 -0700397 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100398 PROT_READ,
399 MAP_PRIVATE,
400 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700401 /*start=*/0,
402 /*low_4gb=*/true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100403 scratch_file.GetFilename().c_str(),
404 &error_msg);
405 ASSERT_TRUE(map.IsValid()) << error_msg;
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800406 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100407 ASSERT_EQ(map.Size(), kMapSize);
408 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800409}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800410#endif
411
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700412TEST_F(MemMapTest, MapAnonymousExactAddr) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700413 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700414 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800415 // Find a valid address.
Andreas Gampe0de385f2018-10-11 11:11:13 -0700416 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700417 // Map at an address that should work, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100418 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
419 valid_address,
420 kPageSize,
421 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700422 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100423 &error_msg);
424 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700425 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100426 ASSERT_TRUE(map0.BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700427 // Map at an unspecified address, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100428 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700429 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100430 kPageSize,
431 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700432 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100433 &error_msg);
434 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700435 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100436 ASSERT_TRUE(map1.BaseBegin() != nullptr);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700437 // Attempt to map at the same address, which should fail.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100438 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
439 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
440 kPageSize,
441 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700442 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100443 &error_msg);
444 ASSERT_FALSE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700445 ASSERT_TRUE(!error_msg.empty());
446}
447
Ian Rogersef7d42f2014-01-06 12:55:46 -0800448TEST_F(MemMapTest, RemapAtEnd) {
449 RemapAtEndTest(false);
450}
451
452#ifdef __LP64__
453TEST_F(MemMapTest, RemapAtEnd32bit) {
454 RemapAtEndTest(true);
455}
456#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700457
Orion Hodson1d3fd082018-09-28 09:38:35 +0100458TEST_F(MemMapTest, RemapFileViewAtEnd) {
459 CommonInit();
460 std::string error_msg;
461 ScratchFile scratch_file;
462
463 // Create a scratch file 3 pages large.
464 constexpr size_t kMapSize = 3 * kPageSize;
465 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
466 memset(data.get(), 1, kPageSize);
467 memset(&data[0], 0x55, kPageSize);
468 memset(&data[kPageSize], 0x5a, kPageSize);
469 memset(&data[2 * kPageSize], 0xaa, kPageSize);
470 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
471
Andreas Gampe0de385f2018-10-11 11:11:13 -0700472 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100473 PROT_READ,
474 MAP_PRIVATE,
475 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700476 /*start=*/0,
477 /*low_4gb=*/true,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100478 scratch_file.GetFilename().c_str(),
479 &error_msg);
480 ASSERT_TRUE(map.IsValid()) << error_msg;
481 ASSERT_TRUE(error_msg.empty());
482 ASSERT_EQ(map.Size(), kMapSize);
483 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
484 ASSERT_EQ(data[0], *map.Begin());
485 ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
486 ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
487
488 for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
489 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
490 "bad_offset_map",
491 PROT_READ,
492 MAP_PRIVATE | MAP_FIXED,
493 scratch_file.GetFd(),
494 offset,
495 &error_msg);
496 ASSERT_TRUE(tail.IsValid()) << error_msg;
497 ASSERT_TRUE(error_msg.empty());
498 ASSERT_EQ(offset, map.Size());
499 ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
500 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
501 ASSERT_EQ(data[offset], *tail.Begin());
502 }
503}
504
Qiming Shi84d49cc2014-04-24 15:38:41 +0800505TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000506 // Some MIPS32 hardware (namely the Creator Ci20 development board)
507 // cannot allocate in the 2GB-4GB region.
508 TEST_DISABLED_FOR_MIPS();
509
Roland Levillain0b0d3b42018-06-14 13:55:49 +0100510 // This test does not work under AddressSanitizer.
511 // Historical note: This test did not work under Valgrind either.
Roland Levillain05e34f42018-05-24 13:19:05 +0000512 TEST_DISABLED_FOR_MEMORY_TOOL();
513
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700514 CommonInit();
Roland Levillain05e34f42018-05-24 13:19:05 +0000515 constexpr size_t size = 0x100000;
516 // Try all addresses starting from 2GB to 4GB.
517 size_t start_addr = 2 * GB;
518 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100519 MemMap map;
Roland Levillain05e34f42018-05-24 13:19:05 +0000520 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100521 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
522 reinterpret_cast<uint8_t*>(start_addr),
523 size,
524 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700525 /*low_4gb=*/ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100526 &error_msg);
527 if (map.IsValid()) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000528 break;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800529 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700530 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100531 ASSERT_TRUE(map.IsValid()) << error_msg;
532 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
Roland Levillain05e34f42018-05-24 13:19:05 +0000533 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100534 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800535}
536
537TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700538 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800539 std::string error_msg;
540 uintptr_t ptr = 0;
541 ptr -= kPageSize; // Now it's close to the top.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100542 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
543 reinterpret_cast<uint8_t*>(ptr),
544 2 * kPageSize, // brings it over the top.
545 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700546 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100547 &error_msg);
548 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800549 ASSERT_FALSE(error_msg.empty());
550}
551
552#ifdef __LP64__
553TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700554 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800555 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100556 MemMap map =
Vladimir Marko5c42c292015-02-25 12:02:49 +0000557 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
558 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
559 kPageSize,
560 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700561 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100562 &error_msg);
563 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800564 ASSERT_FALSE(error_msg.empty());
565}
566
567TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700568 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800569 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100570 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
571 reinterpret_cast<uint8_t*>(0xF0000000),
572 0x20000000,
573 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700574 /* low_4gb= */ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100575 &error_msg);
576 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800577 ASSERT_FALSE(error_msg.empty());
578}
579#endif
580
Vladimir Marko5c42c292015-02-25 12:02:49 +0000581TEST_F(MemMapTest, MapAnonymousReuse) {
582 CommonInit();
583 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100584 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
585 nullptr,
586 0x20000,
587 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700588 /* low_4gb= */ false,
589 /* reuse= */ false,
590 /* reservation= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100591 &error_msg);
592 ASSERT_TRUE(map.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000593 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100594 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
595 reinterpret_cast<uint8_t*>(map.BaseBegin()),
596 0x10000,
597 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700598 /* low_4gb= */ false,
599 /* reuse= */ true,
600 /* reservation= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100601 &error_msg);
602 ASSERT_TRUE(map2.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000603 ASSERT_TRUE(error_msg.empty());
604}
605
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700606TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700607 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700608 std::string error_msg;
609 constexpr size_t kNumPages = 3;
610 // Map a 3-page mem map.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100611 MemMap map = MemMap::MapAnonymous("MapAnonymous0",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700612 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100613 kPageSize * kNumPages,
614 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700615 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100616 &error_msg);
617 ASSERT_TRUE(map.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700618 ASSERT_TRUE(error_msg.empty());
619 // Record the base address.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100620 uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700621 // Unmap it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100622 map.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700623
624 // Map at the same address, but in page-sized separate mem maps,
625 // assuming the space at the address is still available.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100626 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
627 map_base,
628 kPageSize,
629 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700630 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100631 &error_msg);
632 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700633 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100634 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
635 map_base + kPageSize,
636 kPageSize,
637 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700638 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100639 &error_msg);
640 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700641 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100642 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
643 map_base + kPageSize * 2,
644 kPageSize,
645 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700646 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100647 &error_msg);
648 ASSERT_TRUE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700649 ASSERT_TRUE(error_msg.empty());
650
651 // One-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100652 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
653 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
654 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700655
656 // Two or three-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100657 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
658 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
659 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700660
661 // Unmap the middle one.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100662 map1.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700663
664 // Should return false now that there's a gap in the middle.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100665 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700666}
667
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800668TEST_F(MemMapTest, AlignBy) {
669 CommonInit();
670 std::string error_msg;
671 // Cast the page size to size_t.
672 const size_t page_size = static_cast<size_t>(kPageSize);
673 // Map a region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100674 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700675 /* addr= */ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100676 14 * page_size,
677 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700678 /* low_4gb= */ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100679 &error_msg);
680 ASSERT_TRUE(m0.IsValid());
681 uint8_t* base0 = m0.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800682 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100683 ASSERT_EQ(m0.Size(), 14 * page_size);
684 ASSERT_EQ(m0.BaseBegin(), base0);
685 ASSERT_EQ(m0.BaseSize(), m0.Size());
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800686
687 // Break it into several regions by using RemapAtEnd.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100688 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
689 "MemMapTest_AlignByTest_map1",
690 PROT_READ | PROT_WRITE,
691 &error_msg);
692 uint8_t* base1 = m1.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800693 ASSERT_TRUE(base1 != nullptr) << error_msg;
694 ASSERT_EQ(base1, base0 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100695 ASSERT_EQ(m0.Size(), 3 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800696
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100697 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
698 "MemMapTest_AlignByTest_map2",
699 PROT_READ | PROT_WRITE,
700 &error_msg);
701 uint8_t* base2 = m2.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800702 ASSERT_TRUE(base2 != nullptr) << error_msg;
703 ASSERT_EQ(base2, base1 + 4 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100704 ASSERT_EQ(m1.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800705
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100706 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
707 "MemMapTest_AlignByTest_map1",
708 PROT_READ | PROT_WRITE,
709 &error_msg);
710 uint8_t* base3 = m3.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800711 ASSERT_TRUE(base3 != nullptr) << error_msg;
712 ASSERT_EQ(base3, base2 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100713 ASSERT_EQ(m2.Size(), 3 * page_size);
714 ASSERT_EQ(m3.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800715
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100716 uint8_t* end0 = base0 + m0.Size();
717 uint8_t* end1 = base1 + m1.Size();
718 uint8_t* end2 = base2 + m2.Size();
719 uint8_t* end3 = base3 + m3.Size();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800720
721 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
722
723 if (IsAlignedParam(base0, 2 * page_size)) {
724 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
725 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
726 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
727 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
728 } else {
729 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
730 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
731 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
732 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
733 }
734
735 // Align by 2 * page_size;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100736 m0.AlignBy(2 * page_size);
737 m1.AlignBy(2 * page_size);
738 m2.AlignBy(2 * page_size);
739 m3.AlignBy(2 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800740
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100741 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
742 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
743 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
744 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800745
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100746 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
747 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
748 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
749 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800750
751 if (IsAlignedParam(base0, 2 * page_size)) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100752 EXPECT_EQ(m0.Begin(), base0);
753 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
754 EXPECT_EQ(m1.Begin(), base1 + page_size);
755 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
756 EXPECT_EQ(m2.Begin(), base2 + page_size);
757 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
758 EXPECT_EQ(m3.Begin(), base3);
759 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800760 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100761 EXPECT_EQ(m0.Begin(), base0 + page_size);
762 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
763 EXPECT_EQ(m1.Begin(), base1);
764 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
765 EXPECT_EQ(m2.Begin(), base2);
766 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
767 EXPECT_EQ(m3.Begin(), base3 + page_size);
768 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800769 }
770}
771
Vladimir Markoc09cd052018-08-23 16:36:36 +0100772TEST_F(MemMapTest, Reservation) {
773 CommonInit();
774 std::string error_msg;
775 ScratchFile scratch_file;
776 constexpr size_t kMapSize = 5 * kPageSize;
777 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
778 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
779
780 MemMap reservation = MemMap::MapAnonymous("Test reservation",
Andreas Gampe0de385f2018-10-11 11:11:13 -0700781 /* addr= */ nullptr,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100782 kMapSize,
783 PROT_NONE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700784 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100785 &error_msg);
786 ASSERT_TRUE(reservation.IsValid());
787 ASSERT_TRUE(error_msg.empty());
788
789 // Map first part of the reservation.
790 constexpr size_t kChunk1Size = kPageSize - 1u;
791 static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
792 uint8_t* addr1 = reservation.Begin();
793 MemMap map1 = MemMap::MapFileAtAddress(addr1,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700794 /* byte_count= */ kChunk1Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100795 PROT_READ,
796 MAP_PRIVATE,
797 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700798 /* start= */ 0,
799 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100800 scratch_file.GetFilename().c_str(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700801 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100802 &reservation,
803 &error_msg);
804 ASSERT_TRUE(map1.IsValid()) << error_msg;
805 ASSERT_TRUE(error_msg.empty());
806 ASSERT_EQ(map1.Size(), kChunk1Size);
807 ASSERT_EQ(addr1, map1.Begin());
808 ASSERT_TRUE(reservation.IsValid());
809 // Entire pages are taken from the `reservation`.
810 ASSERT_LT(map1.End(), map1.BaseEnd());
811 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
812
813 // Map second part as an anonymous mapping.
814 constexpr size_t kChunk2Size = 2 * kPageSize;
815 DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
816 uint8_t* addr2 = reservation.Begin();
817 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
818 addr2,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700819 /* byte_count= */ kChunk2Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100820 PROT_READ,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700821 /* low_4gb= */ false,
822 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100823 &reservation,
824 &error_msg);
825 ASSERT_TRUE(map2.IsValid()) << error_msg;
826 ASSERT_TRUE(error_msg.empty());
827 ASSERT_EQ(map2.Size(), kChunk2Size);
828 ASSERT_EQ(addr2, map2.Begin());
829 ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
830 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
831
832 // Map the rest of the reservation except the last byte.
833 const size_t kChunk3Size = reservation.Size() - 1u;
834 uint8_t* addr3 = reservation.Begin();
835 MemMap map3 = MemMap::MapFileAtAddress(addr3,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700836 /* byte_count= */ kChunk3Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100837 PROT_READ,
838 MAP_PRIVATE,
839 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700840 /* start= */ dchecked_integral_cast<size_t>(addr3 - addr1),
841 /* low_4gb= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100842 scratch_file.GetFilename().c_str(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700843 /* reuse= */ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100844 &reservation,
845 &error_msg);
846 ASSERT_TRUE(map3.IsValid()) << error_msg;
847 ASSERT_TRUE(error_msg.empty());
848 ASSERT_EQ(map3.Size(), kChunk3Size);
849 ASSERT_EQ(addr3, map3.Begin());
850 // Entire pages are taken from the `reservation`, so it's now exhausted.
851 ASSERT_FALSE(reservation.IsValid());
852
853 // Now split the MiddleReservation.
854 constexpr size_t kChunk2ASize = kPageSize - 1u;
855 DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
856 MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
857 ASSERT_TRUE(map2a.IsValid()) << error_msg;
858 ASSERT_TRUE(error_msg.empty());
859 ASSERT_EQ(map2a.Size(), kChunk2ASize);
860 ASSERT_EQ(addr2, map2a.Begin());
861 ASSERT_TRUE(map2.IsValid());
862 ASSERT_LT(map2a.End(), map2a.BaseEnd());
863 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
864
865 // And take the rest of the middle reservation.
866 const size_t kChunk2BSize = map2.Size() - 1u;
867 uint8_t* addr2b = map2.Begin();
868 MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
869 ASSERT_TRUE(map2b.IsValid()) << error_msg;
870 ASSERT_TRUE(error_msg.empty());
871 ASSERT_EQ(map2b.Size(), kChunk2ASize);
872 ASSERT_EQ(addr2b, map2b.Begin());
873 ASSERT_FALSE(map2.IsValid());
874}
875
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700876} // namespace art