blob: 074d4c2890c4e72e603aca5096e127f972699e3e [file] [log] [blame]
Brian Carlstrom9004cb62013-07-26 15:48:31 -07001/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "mem_map.h"
18
Andreas Gamped4901292017-05-30 18:41:34 -070019#include <sys/mman.h>
20
Ian Rogers700a4022014-05-19 16:49:03 -070021#include <memory>
Alex Lightca97ada2018-02-02 09:25:31 -080022#include <random>
Ian Rogers700a4022014-05-19 16:49:03 -070023
Andreas Gampec857f4a2018-10-25 13:12:37 -070024#include "common_art_test.h"
David Sehrd5f8de82018-04-27 14:12:03 -070025#include "common_runtime_test.h" // For TEST_DISABLED_FOR_MIPS
Andreas Gampec857f4a2018-10-25 13:12:37 -070026#include "logging.h"
David Sehr1979c642018-04-26 14:41:18 -070027#include "memory_tool.h"
28#include "unix_file/fd_file.h"
Brian Carlstrom9004cb62013-07-26 15:48:31 -070029
30namespace art {
31
David Sehrd5f8de82018-04-27 14:12:03 -070032class MemMapTest : public CommonArtTest {
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -070033 public:
Alex Lightca97ada2018-02-02 09:25:31 -080034 static bool IsAddressMapped(void* addr) {
35 bool res = msync(addr, 1, MS_SYNC) == 0;
36 if (!res && errno != ENOMEM) {
37 PLOG(FATAL) << "Unexpected error occurred on msync";
38 }
39 return res;
40 }
41
42 static std::vector<uint8_t> RandomData(size_t size) {
43 std::random_device rd;
44 std::uniform_int_distribution<uint8_t> dist;
45 std::vector<uint8_t> res;
46 res.resize(size);
47 for (size_t i = 0; i < size; i++) {
48 res[i] = dist(rd);
49 }
50 return res;
51 }
52
Mathieu Chartier16d29f82015-11-10 10:32:52 -080053 static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
54 // Find a valid map address and unmap it before returning.
55 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010056 MemMap map = MemMap::MapAnonymous("temp",
Vladimir Markoc34bebf2018-08-16 16:12:49 +010057 size,
58 PROT_READ,
59 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010060 &error_msg);
61 CHECK(map.IsValid());
62 return map.Begin();
Mathieu Chartier16d29f82015-11-10 10:32:52 -080063 }
64
Ian Rogersef7d42f2014-01-06 12:55:46 -080065 static void RemapAtEndTest(bool low_4gb) {
66 std::string error_msg;
67 // Cast the page size to size_t.
68 const size_t page_size = static_cast<size_t>(kPageSize);
69 // Map a two-page memory region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010070 MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
Vladimir Markoc34bebf2018-08-16 16:12:49 +010071 2 * page_size,
72 PROT_READ | PROT_WRITE,
73 low_4gb,
Vladimir Markoc34bebf2018-08-16 16:12:49 +010074 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080075 // Check its state and write to it.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010076 ASSERT_TRUE(m0.IsValid());
77 uint8_t* base0 = m0.Begin();
Ian Rogersef7d42f2014-01-06 12:55:46 -080078 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +010079 size_t size0 = m0.Size();
80 EXPECT_EQ(m0.Size(), 2 * page_size);
81 EXPECT_EQ(m0.BaseBegin(), base0);
82 EXPECT_EQ(m0.BaseSize(), size0);
Ian Rogersef7d42f2014-01-06 12:55:46 -080083 memset(base0, 42, 2 * page_size);
84 // Remap the latter half into a second MemMap.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010085 MemMap m1 = m0.RemapAtEnd(base0 + page_size,
86 "MemMapTest_RemapAtEndTest_map1",
87 PROT_READ | PROT_WRITE,
88 &error_msg);
Ian Rogersef7d42f2014-01-06 12:55:46 -080089 // Check the states of the two maps.
Vladimir Markoc34bebf2018-08-16 16:12:49 +010090 EXPECT_EQ(m0.Begin(), base0) << error_msg;
91 EXPECT_EQ(m0.Size(), page_size);
92 EXPECT_EQ(m0.BaseBegin(), base0);
93 EXPECT_EQ(m0.BaseSize(), page_size);
94 uint8_t* base1 = m1.Begin();
95 size_t size1 = m1.Size();
Ian Rogersef7d42f2014-01-06 12:55:46 -080096 EXPECT_EQ(base1, base0 + page_size);
97 EXPECT_EQ(size1, page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +010098 EXPECT_EQ(m1.BaseBegin(), base1);
99 EXPECT_EQ(m1.BaseSize(), size1);
Ian Rogersef7d42f2014-01-06 12:55:46 -0800100 // Write to the second region.
101 memset(base1, 43, page_size);
102 // Check the contents of the two regions.
103 for (size_t i = 0; i < page_size; ++i) {
104 EXPECT_EQ(base0[i], 42);
105 }
106 for (size_t i = 0; i < page_size; ++i) {
107 EXPECT_EQ(base1[i], 43);
108 }
109 // Unmap the first region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100110 m0.Reset();
Ian Rogersef7d42f2014-01-06 12:55:46 -0800111 // Make sure the second region is still accessible after the first
112 // region is unmapped.
113 for (size_t i = 0; i < page_size; ++i) {
114 EXPECT_EQ(base1[i], 43);
115 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100116 MemMap m2 = m1.RemapAtEnd(m1.Begin(),
117 "MemMapTest_RemapAtEndTest_map1",
118 PROT_READ | PROT_WRITE,
119 &error_msg);
120 ASSERT_TRUE(m2.IsValid()) << error_msg;
121 ASSERT_FALSE(m1.IsValid());
Ian Rogersef7d42f2014-01-06 12:55:46 -0800122 }
Andreas Gamped8f26db2014-05-19 17:01:13 -0700123
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700124 void CommonInit() {
125 MemMap::Init();
126 }
127
Andreas Gamped8f26db2014-05-19 17:01:13 -0700128#if defined(__LP64__) && !defined(__x86_64__)
129 static uintptr_t GetLinearScanPos() {
130 return MemMap::next_mem_pos_;
131 }
132#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700133};
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700134
Andreas Gamped8f26db2014-05-19 17:01:13 -0700135#if defined(__LP64__) && !defined(__x86_64__)
136
137#ifdef __BIONIC__
138extern uintptr_t CreateStartPos(uint64_t input);
139#endif
140
141TEST_F(MemMapTest, Start) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700142 CommonInit();
Andreas Gamped8f26db2014-05-19 17:01:13 -0700143 uintptr_t start = GetLinearScanPos();
144 EXPECT_LE(64 * KB, start);
145 EXPECT_LT(start, static_cast<uintptr_t>(ART_BASE_ADDRESS));
Andreas Gamped8f26db2014-05-19 17:01:13 -0700146#ifdef __BIONIC__
147 // Test a couple of values. Make sure they are different.
148 uintptr_t last = 0;
149 for (size_t i = 0; i < 100; ++i) {
150 uintptr_t random_start = CreateStartPos(i * kPageSize);
151 EXPECT_NE(last, random_start);
152 last = random_start;
153 }
154
155 // Even on max, should be below ART_BASE_ADDRESS.
156 EXPECT_LT(CreateStartPos(~0), static_cast<uintptr_t>(ART_BASE_ADDRESS));
157#endif
158 // End of test.
159}
160#endif
161
Alex Lightca97ada2018-02-02 09:25:31 -0800162// We need mremap to be able to test ReplaceMapping at all
163#if HAVE_MREMAP_SYSCALL
164TEST_F(MemMapTest, ReplaceMapping_SameSize) {
165 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100166 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100167 kPageSize,
168 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100169 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100170 &error_msg);
171 ASSERT_TRUE(dest.IsValid());
172 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100173 kPageSize,
174 PROT_WRITE | PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100175 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100176 &error_msg);
177 ASSERT_TRUE(source.IsValid());
178 void* source_addr = source.Begin();
179 void* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800180 ASSERT_TRUE(IsAddressMapped(source_addr));
181 ASSERT_TRUE(IsAddressMapped(dest_addr));
182
183 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100184 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800185
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100186 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800187
188 ASSERT_FALSE(IsAddressMapped(source_addr));
189 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100190 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800191
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100192 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800193
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100194 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800195}
196
197TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
198 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100199 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100200 5 * kPageSize, // Need to make it larger
201 // initially so we know
202 // there won't be mappings
Vladimir Marko830f3562018-10-31 12:58:44 +0000203 // in the way when we move
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100204 // source.
205 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100206 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100207 &error_msg);
208 ASSERT_TRUE(dest.IsValid());
209 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100210 3 * kPageSize,
211 PROT_WRITE | PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100212 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100213 &error_msg);
214 ASSERT_TRUE(source.IsValid());
215 uint8_t* source_addr = source.Begin();
216 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800217 ASSERT_TRUE(IsAddressMapped(source_addr));
218
219 // Fill the source with random data.
220 std::vector<uint8_t> data = RandomData(3 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100221 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800222
223 // Make the dest smaller so that we know we'll have space.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100224 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800225
226 ASSERT_TRUE(IsAddressMapped(dest_addr));
227 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100228 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800229
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100230 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800231
232 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100233 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800234 ASSERT_TRUE(IsAddressMapped(dest_addr));
235 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100236 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800237
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100238 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800239}
240
241TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
242 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100243 MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100244 3 * kPageSize,
245 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100246 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100247 &error_msg);
248 ASSERT_TRUE(dest.IsValid());
249 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100250 kPageSize,
251 PROT_WRITE | PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100252 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100253 &error_msg);
254 ASSERT_TRUE(source.IsValid());
255 uint8_t* source_addr = source.Begin();
256 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800257 ASSERT_TRUE(IsAddressMapped(source_addr));
258 ASSERT_TRUE(IsAddressMapped(dest_addr));
259 ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100260 ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800261
262 std::vector<uint8_t> data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100263 memcpy(source.Begin(), data.data(), kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800264
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100265 ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800266
267 ASSERT_FALSE(IsAddressMapped(source_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100268 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800269 ASSERT_TRUE(IsAddressMapped(dest_addr));
270 ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100271 ASSERT_FALSE(source.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800272
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100273 ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800274}
275
276TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
277 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100278 MemMap dest =
Alex Lightca97ada2018-02-02 09:25:31 -0800279 MemMap::MapAnonymous(
280 "MapAnonymousEmpty-atomic-replace-dest",
Alex Lightca97ada2018-02-02 09:25:31 -0800281 3 * kPageSize, // Need to make it larger initially so we know there won't be mappings in
Vladimir Marko830f3562018-10-31 12:58:44 +0000282 // the way when we move source.
Alex Lightca97ada2018-02-02 09:25:31 -0800283 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100284 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100285 &error_msg);
286 ASSERT_TRUE(dest.IsValid());
Alex Lightca97ada2018-02-02 09:25:31 -0800287 // Resize down to 1 page so we can remap the rest.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100288 dest.SetSize(kPageSize);
Alex Lightca97ada2018-02-02 09:25:31 -0800289 // Create source from the last 2 pages
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100290 MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
291 dest.Begin() + kPageSize,
292 2 * kPageSize,
293 PROT_WRITE | PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100294 /*low_4gb=*/ false,
295 /*reuse=*/ false,
296 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100297 &error_msg);
298 ASSERT_TRUE(source.IsValid());
299 ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
300 uint8_t* source_addr = source.Begin();
301 uint8_t* dest_addr = dest.Begin();
Alex Lightca97ada2018-02-02 09:25:31 -0800302 ASSERT_TRUE(IsAddressMapped(source_addr));
303
304 // Fill the source and dest with random data.
305 std::vector<uint8_t> data = RandomData(2 * kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100306 memcpy(source.Begin(), data.data(), data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800307 std::vector<uint8_t> dest_data = RandomData(kPageSize);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100308 memcpy(dest.Begin(), dest_data.data(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800309
310 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100311 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
Alex Lightca97ada2018-02-02 09:25:31 -0800312
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100313 ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
Alex Lightca97ada2018-02-02 09:25:31 -0800314
Alex Lightca97ada2018-02-02 09:25:31 -0800315 ASSERT_TRUE(IsAddressMapped(source_addr));
316 ASSERT_TRUE(IsAddressMapped(dest_addr));
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100317 ASSERT_EQ(source.Size(), data.size());
318 ASSERT_EQ(dest.Size(), dest_data.size());
Alex Lightca97ada2018-02-02 09:25:31 -0800319
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100320 ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
321 ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
Alex Lightca97ada2018-02-02 09:25:31 -0800322}
323#endif // HAVE_MREMAP_SYSCALL
324
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700325TEST_F(MemMapTest, MapAnonymousEmpty) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700326 CommonInit();
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700327 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100328 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko11306592018-10-26 14:22:59 +0100329 /*byte_count=*/ 0,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100330 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100331 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100332 &error_msg);
333 ASSERT_FALSE(map.IsValid()) << error_msg;
334 ASSERT_FALSE(error_msg.empty());
335
336 error_msg.clear();
337 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100338 kPageSize,
339 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100340 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100341 &error_msg);
342 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogers8d31bbd2013-10-13 10:44:14 -0700343 ASSERT_TRUE(error_msg.empty());
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700344}
345
Mathieu Chartier486932a2016-02-24 10:09:23 -0800346TEST_F(MemMapTest, MapAnonymousFailNullError) {
347 CommonInit();
348 // Test that we don't crash with a null error_str when mapping at an invalid location.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100349 MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
350 reinterpret_cast<uint8_t*>(kPageSize),
351 0x20000,
352 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100353 /*low_4gb=*/ false,
354 /*reuse=*/ false,
355 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100356 nullptr);
357 ASSERT_FALSE(map.IsValid());
Mathieu Chartier486932a2016-02-24 10:09:23 -0800358}
359
Ian Rogersef7d42f2014-01-06 12:55:46 -0800360#ifdef __LP64__
361TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700362 CommonInit();
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700363 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100364 MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
Vladimir Marko11306592018-10-26 14:22:59 +0100365 /*byte_count=*/ 0,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100366 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100367 /*low_4gb=*/ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100368 &error_msg);
369 ASSERT_FALSE(map.IsValid()) << error_msg;
370 ASSERT_FALSE(error_msg.empty());
371
372 error_msg.clear();
373 map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100374 kPageSize,
375 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100376 /*low_4gb=*/ true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100377 &error_msg);
378 ASSERT_TRUE(map.IsValid()) << error_msg;
Ian Rogersef7d42f2014-01-06 12:55:46 -0800379 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100380 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700381}
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800382TEST_F(MemMapTest, MapFile32Bit) {
383 CommonInit();
384 std::string error_msg;
385 ScratchFile scratch_file;
386 constexpr size_t kMapSize = kPageSize;
387 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
388 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
Andreas Gampe0de385f2018-10-11 11:11:13 -0700389 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100390 PROT_READ,
391 MAP_PRIVATE,
392 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700393 /*start=*/0,
394 /*low_4gb=*/true,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100395 scratch_file.GetFilename().c_str(),
396 &error_msg);
397 ASSERT_TRUE(map.IsValid()) << error_msg;
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800398 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100399 ASSERT_EQ(map.Size(), kMapSize);
400 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
Mathieu Chartier42bddce2015-11-09 15:16:56 -0800401}
Ian Rogersef7d42f2014-01-06 12:55:46 -0800402#endif
403
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700404TEST_F(MemMapTest, MapAnonymousExactAddr) {
Vladimir Marko1c1442a2018-10-26 13:39:14 +0100405 // TODO: The semantics of the MemMap::MapAnonymous() with a given address but without
406 // `reuse == true` or `reservation != nullptr` is weird. We should either drop support
407 // for it, or take it only as a hint and allow the result to be mapped elsewhere.
408 // Currently we're seeing failures with ASAN. b/118408378
409 TEST_DISABLED_FOR_MEMORY_TOOL();
410
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700411 CommonInit();
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700412 std::string error_msg;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800413 // Find a valid address.
Andreas Gampe0de385f2018-10-11 11:11:13 -0700414 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb=*/false);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700415 // Map at an address that should work, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100416 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
417 valid_address,
418 kPageSize,
419 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100420 /*low_4gb=*/ false,
421 /*reuse=*/ false,
422 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100423 &error_msg);
424 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700425 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100426 ASSERT_TRUE(map0.BaseBegin() == valid_address);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700427 // Map at an unspecified address, which should succeed.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100428 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100429 kPageSize,
430 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100431 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100432 &error_msg);
433 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700434 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100435 ASSERT_TRUE(map1.BaseBegin() != nullptr);
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700436 // Attempt to map at the same address, which should fail.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100437 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
438 reinterpret_cast<uint8_t*>(map1.BaseBegin()),
439 kPageSize,
440 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100441 /*low_4gb=*/ false,
442 /*reuse=*/ false,
443 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100444 &error_msg);
445 ASSERT_FALSE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi4fb5df82014-03-13 15:10:27 -0700446 ASSERT_TRUE(!error_msg.empty());
447}
448
Ian Rogersef7d42f2014-01-06 12:55:46 -0800449TEST_F(MemMapTest, RemapAtEnd) {
450 RemapAtEndTest(false);
451}
452
453#ifdef __LP64__
454TEST_F(MemMapTest, RemapAtEnd32bit) {
455 RemapAtEndTest(true);
456}
457#endif
Hiroshi Yamauchifd7e7f12013-10-22 14:17:48 -0700458
Orion Hodson1d3fd082018-09-28 09:38:35 +0100459TEST_F(MemMapTest, RemapFileViewAtEnd) {
460 CommonInit();
461 std::string error_msg;
462 ScratchFile scratch_file;
463
464 // Create a scratch file 3 pages large.
465 constexpr size_t kMapSize = 3 * kPageSize;
466 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
467 memset(data.get(), 1, kPageSize);
468 memset(&data[0], 0x55, kPageSize);
469 memset(&data[kPageSize], 0x5a, kPageSize);
470 memset(&data[2 * kPageSize], 0xaa, kPageSize);
471 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
472
Andreas Gampe0de385f2018-10-11 11:11:13 -0700473 MemMap map = MemMap::MapFile(/*byte_count=*/kMapSize,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100474 PROT_READ,
475 MAP_PRIVATE,
476 scratch_file.GetFd(),
Andreas Gampe0de385f2018-10-11 11:11:13 -0700477 /*start=*/0,
478 /*low_4gb=*/true,
Orion Hodson1d3fd082018-09-28 09:38:35 +0100479 scratch_file.GetFilename().c_str(),
480 &error_msg);
481 ASSERT_TRUE(map.IsValid()) << error_msg;
482 ASSERT_TRUE(error_msg.empty());
483 ASSERT_EQ(map.Size(), kMapSize);
484 ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
485 ASSERT_EQ(data[0], *map.Begin());
486 ASSERT_EQ(data[kPageSize], *(map.Begin() + kPageSize));
487 ASSERT_EQ(data[2 * kPageSize], *(map.Begin() + 2 * kPageSize));
488
489 for (size_t offset = 2 * kPageSize; offset > 0; offset -= kPageSize) {
490 MemMap tail = map.RemapAtEnd(map.Begin() + offset,
491 "bad_offset_map",
492 PROT_READ,
493 MAP_PRIVATE | MAP_FIXED,
494 scratch_file.GetFd(),
495 offset,
496 &error_msg);
497 ASSERT_TRUE(tail.IsValid()) << error_msg;
498 ASSERT_TRUE(error_msg.empty());
499 ASSERT_EQ(offset, map.Size());
500 ASSERT_EQ(static_cast<size_t>(kPageSize), tail.Size());
501 ASSERT_EQ(tail.Begin(), map.Begin() + map.Size());
502 ASSERT_EQ(data[offset], *tail.Begin());
503 }
504}
505
Qiming Shi84d49cc2014-04-24 15:38:41 +0800506TEST_F(MemMapTest, MapAnonymousExactAddr32bitHighAddr) {
Roland Levillain14306b82016-01-20 12:13:57 +0000507 // Some MIPS32 hardware (namely the Creator Ci20 development board)
508 // cannot allocate in the 2GB-4GB region.
509 TEST_DISABLED_FOR_MIPS();
510
Roland Levillain0b0d3b42018-06-14 13:55:49 +0100511 // This test does not work under AddressSanitizer.
512 // Historical note: This test did not work under Valgrind either.
Roland Levillain05e34f42018-05-24 13:19:05 +0000513 TEST_DISABLED_FOR_MEMORY_TOOL();
514
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700515 CommonInit();
Roland Levillain05e34f42018-05-24 13:19:05 +0000516 constexpr size_t size = 0x100000;
517 // Try all addresses starting from 2GB to 4GB.
518 size_t start_addr = 2 * GB;
519 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100520 MemMap map;
Roland Levillain05e34f42018-05-24 13:19:05 +0000521 for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100522 map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
523 reinterpret_cast<uint8_t*>(start_addr),
524 size,
525 PROT_READ | PROT_WRITE,
Andreas Gampe0de385f2018-10-11 11:11:13 -0700526 /*low_4gb=*/ true,
Vladimir Marko11306592018-10-26 14:22:59 +0100527 /*reuse=*/ false,
528 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100529 &error_msg);
530 if (map.IsValid()) {
Roland Levillain05e34f42018-05-24 13:19:05 +0000531 break;
Mathieu Chartier16d29f82015-11-10 10:32:52 -0800532 }
Andreas Gampe928f72b2014-09-09 19:53:48 -0700533 }
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100534 ASSERT_TRUE(map.IsValid()) << error_msg;
535 ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
Roland Levillain05e34f42018-05-24 13:19:05 +0000536 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100537 ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
Qiming Shi84d49cc2014-04-24 15:38:41 +0800538}
539
540TEST_F(MemMapTest, MapAnonymousOverflow) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700541 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800542 std::string error_msg;
543 uintptr_t ptr = 0;
544 ptr -= kPageSize; // Now it's close to the top.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100545 MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
546 reinterpret_cast<uint8_t*>(ptr),
547 2 * kPageSize, // brings it over the top.
548 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100549 /*low_4gb=*/ false,
550 /*reuse=*/ false,
551 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100552 &error_msg);
553 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800554 ASSERT_FALSE(error_msg.empty());
555}
556
557#ifdef __LP64__
558TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700559 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800560 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100561 MemMap map =
Vladimir Marko5c42c292015-02-25 12:02:49 +0000562 MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
563 reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
564 kPageSize,
565 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100566 /*low_4gb=*/ true,
567 /*reuse=*/ false,
568 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100569 &error_msg);
570 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800571 ASSERT_FALSE(error_msg.empty());
572}
573
574TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700575 CommonInit();
Qiming Shi84d49cc2014-04-24 15:38:41 +0800576 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100577 MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
Vladimir Marko830f3562018-10-31 12:58:44 +0000578 /*addr=*/ reinterpret_cast<uint8_t*>(0xF0000000),
579 /*byte_count=*/ 0x20000000,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100580 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100581 /*low_4gb=*/ true,
582 /*reuse=*/ false,
583 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100584 &error_msg);
585 ASSERT_FALSE(map.IsValid());
Qiming Shi84d49cc2014-04-24 15:38:41 +0800586 ASSERT_FALSE(error_msg.empty());
587}
588#endif
589
Vladimir Marko5c42c292015-02-25 12:02:49 +0000590TEST_F(MemMapTest, MapAnonymousReuse) {
591 CommonInit();
592 std::string error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100593 MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
Vladimir Marko830f3562018-10-31 12:58:44 +0000594 /*byte_count=*/ 0x20000,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100595 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100596 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100597 &error_msg);
598 ASSERT_TRUE(map.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000599 ASSERT_TRUE(error_msg.empty());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100600 MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
Vladimir Marko830f3562018-10-31 12:58:44 +0000601 /*addr=*/ reinterpret_cast<uint8_t*>(map.BaseBegin()),
602 /*byte_count=*/ 0x10000,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100603 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100604 /*low_4gb=*/ false,
605 /*reuse=*/ true,
606 /*reservation=*/ nullptr,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100607 &error_msg);
608 ASSERT_TRUE(map2.IsValid());
Vladimir Marko5c42c292015-02-25 12:02:49 +0000609 ASSERT_TRUE(error_msg.empty());
610}
611
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700612TEST_F(MemMapTest, CheckNoGaps) {
Mathieu Chartier6e88ef62014-10-14 15:01:24 -0700613 CommonInit();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700614 std::string error_msg;
615 constexpr size_t kNumPages = 3;
616 // Map a 3-page mem map.
Vladimir Marko11306592018-10-26 14:22:59 +0100617 MemMap reservation = MemMap::MapAnonymous("MapAnonymous0",
618 kPageSize * kNumPages,
619 PROT_READ | PROT_WRITE,
620 /*low_4gb=*/ false,
621 &error_msg);
622 ASSERT_TRUE(reservation.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700623 ASSERT_TRUE(error_msg.empty());
624 // Record the base address.
Vladimir Marko11306592018-10-26 14:22:59 +0100625 uint8_t* map_base = reinterpret_cast<uint8_t*>(reservation.BaseBegin());
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700626
Vladimir Marko11306592018-10-26 14:22:59 +0100627 // Map at the same address, taking from the `map` reservation.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100628 MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100629 kPageSize,
630 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100631 /*low_4gb=*/ false,
632 &reservation,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100633 &error_msg);
634 ASSERT_TRUE(map0.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700635 ASSERT_TRUE(error_msg.empty());
Vladimir Marko11306592018-10-26 14:22:59 +0100636 ASSERT_EQ(map_base, map0.Begin());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100637 MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100638 kPageSize,
639 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100640 /*low_4gb=*/ false,
641 &reservation,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100642 &error_msg);
643 ASSERT_TRUE(map1.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700644 ASSERT_TRUE(error_msg.empty());
Vladimir Marko11306592018-10-26 14:22:59 +0100645 ASSERT_EQ(map_base + kPageSize, map1.Begin());
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100646 MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100647 kPageSize,
648 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100649 /*low_4gb=*/ false,
650 &reservation,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100651 &error_msg);
652 ASSERT_TRUE(map2.IsValid()) << error_msg;
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700653 ASSERT_TRUE(error_msg.empty());
Vladimir Marko11306592018-10-26 14:22:59 +0100654 ASSERT_EQ(map_base + 2 * kPageSize, map2.Begin());
655 ASSERT_FALSE(reservation.IsValid()); // The entire reservation was used.
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700656
657 // One-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100658 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
659 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
660 ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700661
662 // Two or three-map cases.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100663 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
664 ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
665 ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700666
667 // Unmap the middle one.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100668 map1.Reset();
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700669
670 // Should return false now that there's a gap in the middle.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100671 ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
Hiroshi Yamauchi3eed93d2014-06-04 11:43:59 -0700672}
673
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800674TEST_F(MemMapTest, AlignBy) {
675 CommonInit();
676 std::string error_msg;
677 // Cast the page size to size_t.
678 const size_t page_size = static_cast<size_t>(kPageSize);
679 // Map a region.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100680 MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100681 14 * page_size,
682 PROT_READ | PROT_WRITE,
Vladimir Marko11306592018-10-26 14:22:59 +0100683 /*low_4gb=*/ false,
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100684 &error_msg);
685 ASSERT_TRUE(m0.IsValid());
686 uint8_t* base0 = m0.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800687 ASSERT_TRUE(base0 != nullptr) << error_msg;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100688 ASSERT_EQ(m0.Size(), 14 * page_size);
689 ASSERT_EQ(m0.BaseBegin(), base0);
690 ASSERT_EQ(m0.BaseSize(), m0.Size());
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800691
692 // Break it into several regions by using RemapAtEnd.
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100693 MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
694 "MemMapTest_AlignByTest_map1",
695 PROT_READ | PROT_WRITE,
696 &error_msg);
697 uint8_t* base1 = m1.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800698 ASSERT_TRUE(base1 != nullptr) << error_msg;
699 ASSERT_EQ(base1, base0 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100700 ASSERT_EQ(m0.Size(), 3 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800701
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100702 MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
703 "MemMapTest_AlignByTest_map2",
704 PROT_READ | PROT_WRITE,
705 &error_msg);
706 uint8_t* base2 = m2.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800707 ASSERT_TRUE(base2 != nullptr) << error_msg;
708 ASSERT_EQ(base2, base1 + 4 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100709 ASSERT_EQ(m1.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800710
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100711 MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
712 "MemMapTest_AlignByTest_map1",
713 PROT_READ | PROT_WRITE,
714 &error_msg);
715 uint8_t* base3 = m3.Begin();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800716 ASSERT_TRUE(base3 != nullptr) << error_msg;
717 ASSERT_EQ(base3, base2 + 3 * page_size);
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100718 ASSERT_EQ(m2.Size(), 3 * page_size);
719 ASSERT_EQ(m3.Size(), 4 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800720
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100721 uint8_t* end0 = base0 + m0.Size();
722 uint8_t* end1 = base1 + m1.Size();
723 uint8_t* end2 = base2 + m2.Size();
724 uint8_t* end3 = base3 + m3.Size();
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800725
726 ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
727
728 if (IsAlignedParam(base0, 2 * page_size)) {
729 ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
730 ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
731 ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
732 ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
733 } else {
734 ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
735 ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
736 ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
737 ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
738 }
739
740 // Align by 2 * page_size;
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100741 m0.AlignBy(2 * page_size);
742 m1.AlignBy(2 * page_size);
743 m2.AlignBy(2 * page_size);
744 m3.AlignBy(2 * page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800745
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100746 EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
747 EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
748 EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
749 EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800750
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100751 EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
752 EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
753 EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
754 EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800755
756 if (IsAlignedParam(base0, 2 * page_size)) {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100757 EXPECT_EQ(m0.Begin(), base0);
758 EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
759 EXPECT_EQ(m1.Begin(), base1 + page_size);
760 EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
761 EXPECT_EQ(m2.Begin(), base2 + page_size);
762 EXPECT_EQ(m2.Begin() + m2.Size(), end2);
763 EXPECT_EQ(m3.Begin(), base3);
764 EXPECT_EQ(m3.Begin() + m3.Size(), end3);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800765 } else {
Vladimir Markoc34bebf2018-08-16 16:12:49 +0100766 EXPECT_EQ(m0.Begin(), base0 + page_size);
767 EXPECT_EQ(m0.Begin() + m0.Size(), end0);
768 EXPECT_EQ(m1.Begin(), base1);
769 EXPECT_EQ(m1.Begin() + m1.Size(), end1);
770 EXPECT_EQ(m2.Begin(), base2);
771 EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
772 EXPECT_EQ(m3.Begin(), base3 + page_size);
773 EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
Hiroshi Yamauchi3c3c4a12017-02-21 16:49:59 -0800774 }
775}
776
Vladimir Markoc09cd052018-08-23 16:36:36 +0100777TEST_F(MemMapTest, Reservation) {
778 CommonInit();
779 std::string error_msg;
780 ScratchFile scratch_file;
781 constexpr size_t kMapSize = 5 * kPageSize;
782 std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
783 ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
784
785 MemMap reservation = MemMap::MapAnonymous("Test reservation",
Vladimir Markoc09cd052018-08-23 16:36:36 +0100786 kMapSize,
787 PROT_NONE,
Vladimir Marko11306592018-10-26 14:22:59 +0100788 /*low_4gb=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100789 &error_msg);
790 ASSERT_TRUE(reservation.IsValid());
791 ASSERT_TRUE(error_msg.empty());
792
793 // Map first part of the reservation.
794 constexpr size_t kChunk1Size = kPageSize - 1u;
795 static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
796 uint8_t* addr1 = reservation.Begin();
797 MemMap map1 = MemMap::MapFileAtAddress(addr1,
Vladimir Marko11306592018-10-26 14:22:59 +0100798 /*byte_count=*/ kChunk1Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100799 PROT_READ,
800 MAP_PRIVATE,
801 scratch_file.GetFd(),
Vladimir Marko11306592018-10-26 14:22:59 +0100802 /*start=*/ 0,
803 /*low_4gb=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100804 scratch_file.GetFilename().c_str(),
Vladimir Marko11306592018-10-26 14:22:59 +0100805 /*reuse=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100806 &reservation,
807 &error_msg);
808 ASSERT_TRUE(map1.IsValid()) << error_msg;
809 ASSERT_TRUE(error_msg.empty());
810 ASSERT_EQ(map1.Size(), kChunk1Size);
811 ASSERT_EQ(addr1, map1.Begin());
812 ASSERT_TRUE(reservation.IsValid());
813 // Entire pages are taken from the `reservation`.
814 ASSERT_LT(map1.End(), map1.BaseEnd());
815 ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
816
817 // Map second part as an anonymous mapping.
818 constexpr size_t kChunk2Size = 2 * kPageSize;
819 DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
820 uint8_t* addr2 = reservation.Begin();
821 MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
822 addr2,
Vladimir Marko11306592018-10-26 14:22:59 +0100823 /*byte_count=*/ kChunk2Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100824 PROT_READ,
Vladimir Marko11306592018-10-26 14:22:59 +0100825 /*low_4gb=*/ false,
826 /*reuse=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100827 &reservation,
828 &error_msg);
829 ASSERT_TRUE(map2.IsValid()) << error_msg;
830 ASSERT_TRUE(error_msg.empty());
831 ASSERT_EQ(map2.Size(), kChunk2Size);
832 ASSERT_EQ(addr2, map2.Begin());
833 ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
834 ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
835
836 // Map the rest of the reservation except the last byte.
837 const size_t kChunk3Size = reservation.Size() - 1u;
838 uint8_t* addr3 = reservation.Begin();
839 MemMap map3 = MemMap::MapFileAtAddress(addr3,
Vladimir Marko11306592018-10-26 14:22:59 +0100840 /*byte_count=*/ kChunk3Size,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100841 PROT_READ,
842 MAP_PRIVATE,
843 scratch_file.GetFd(),
Vladimir Marko11306592018-10-26 14:22:59 +0100844 /*start=*/ dchecked_integral_cast<size_t>(addr3 - addr1),
845 /*low_4gb=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100846 scratch_file.GetFilename().c_str(),
Vladimir Marko11306592018-10-26 14:22:59 +0100847 /*reuse=*/ false,
Vladimir Markoc09cd052018-08-23 16:36:36 +0100848 &reservation,
849 &error_msg);
850 ASSERT_TRUE(map3.IsValid()) << error_msg;
851 ASSERT_TRUE(error_msg.empty());
852 ASSERT_EQ(map3.Size(), kChunk3Size);
853 ASSERT_EQ(addr3, map3.Begin());
854 // Entire pages are taken from the `reservation`, so it's now exhausted.
855 ASSERT_FALSE(reservation.IsValid());
856
857 // Now split the MiddleReservation.
858 constexpr size_t kChunk2ASize = kPageSize - 1u;
859 DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
860 MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
861 ASSERT_TRUE(map2a.IsValid()) << error_msg;
862 ASSERT_TRUE(error_msg.empty());
863 ASSERT_EQ(map2a.Size(), kChunk2ASize);
864 ASSERT_EQ(addr2, map2a.Begin());
865 ASSERT_TRUE(map2.IsValid());
866 ASSERT_LT(map2a.End(), map2a.BaseEnd());
867 ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
868
869 // And take the rest of the middle reservation.
870 const size_t kChunk2BSize = map2.Size() - 1u;
871 uint8_t* addr2b = map2.Begin();
872 MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
873 ASSERT_TRUE(map2b.IsValid()) << error_msg;
874 ASSERT_TRUE(error_msg.empty());
875 ASSERT_EQ(map2b.Size(), kChunk2ASize);
876 ASSERT_EQ(addr2b, map2b.Begin());
877 ASSERT_FALSE(map2.IsValid());
878}
879
Brian Carlstrom9004cb62013-07-26 15:48:31 -0700880} // namespace art
Andreas Gampec857f4a2018-10-25 13:12:37 -0700881
882namespace {
883
884class DumpMapsOnFailListener : public testing::EmptyTestEventListener {
885 void OnTestPartResult(const testing::TestPartResult& result) override {
886 switch (result.type()) {
887 case testing::TestPartResult::kFatalFailure:
888 art::PrintFileToLog("/proc/self/maps", android::base::LogSeverity::ERROR);
889 break;
890
891 // TODO: Could consider logging on EXPECT failures.
892 case testing::TestPartResult::kNonFatalFailure:
Elliott Hughese00648f2018-10-30 08:34:52 -0700893 case testing::TestPartResult::kSkip:
Andreas Gampec857f4a2018-10-25 13:12:37 -0700894 case testing::TestPartResult::kSuccess:
895 break;
896 }
897 }
898};
899
900} // namespace
901
902// Inject our listener into the test runner.
903extern "C"
904__attribute__((visibility("default"))) __attribute__((used))
905void ArtTestGlobalInit() {
906 LOG(ERROR) << "Installing listener";
907 testing::UnitTest::GetInstance()->listeners().Append(new DumpMapsOnFailListener());
908}