Reserve boot image memory in one go.
Load boot image components into the reserved memory.
Test: m test-art-host-gtest
Test: testrunner.py --host
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Bug: 77856493
Change-Id: I214f947979bc0bbfc6df4312527504e90b88a01d
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index a82da2d..1bf553d 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -227,6 +227,33 @@
return false;
}
+bool MemMap::CheckReservation(uint8_t* expected_ptr,
+ size_t byte_count,
+ const char* name,
+ const MemMap& reservation,
+ /*out*/std::string* error_msg) {
+ if (!reservation.IsValid()) {
+ *error_msg = StringPrintf("Invalid reservation for %s", name);
+ return false;
+ }
+ DCHECK_ALIGNED(reservation.Begin(), kPageSize);
+ if (reservation.Begin() != expected_ptr) {
+ *error_msg = StringPrintf("Bad image reservation start for %s: %p instead of %p",
+ name,
+ reservation.Begin(),
+ expected_ptr);
+ return false;
+ }
+ if (byte_count > reservation.Size()) {
+ *error_msg = StringPrintf("Insufficient reservation, required %zu, available %zu",
+ byte_count,
+ reservation.Size());
+ return false;
+ }
+ return true;
+}
+
+
#if USE_ART_LOW_4G_ALLOCATOR
void* MemMap::TryMemMapLow4GB(void* ptr,
size_t page_aligned_byte_count,
@@ -280,7 +307,8 @@
int prot,
bool low_4gb,
bool reuse,
- std::string* error_msg,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg,
bool use_debug_name) {
#ifndef __LP64__
UNUSED(low_4gb);
@@ -296,9 +324,16 @@
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
CHECK(addr != nullptr);
+ DCHECK(reservation == nullptr);
DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
flags |= MAP_FIXED;
+ } else if (reservation != nullptr) {
+ CHECK(addr != nullptr);
+ if (!CheckReservation(addr, byte_count, name, *reservation, error_msg)) {
+ return MemMap::Invalid();
+ }
+ flags |= MAP_FIXED;
}
unique_fd fd;
@@ -340,6 +375,11 @@
SetDebugName(actual, name, page_aligned_byte_count);
}
+ if (reservation != nullptr) {
+ // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+ DCHECK_EQ(actual, reservation->Begin());
+ reservation->ReleaseReservedMemory(byte_count);
+ }
return MemMap(name,
reinterpret_cast<uint8_t*>(actual),
byte_count,
@@ -445,22 +485,30 @@
int fd,
off_t start,
bool low_4gb,
- bool reuse,
const char* filename,
- std::string* error_msg) {
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg) {
CHECK_NE(0, prot);
CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
- // Note that we do not allow MAP_FIXED unless reuse == true, i.e we
- // expect his mapping to be contained within an existing map.
+ // Note that we do not allow MAP_FIXED unless reuse == true or we have an existing
+ // reservation, i.e we expect this mapping to be contained within an existing map.
if (reuse) {
// reuse means it is okay that it overlaps an existing page mapping.
// Only use this if you actually made the page reservation yourself.
CHECK(expected_ptr != nullptr);
+ DCHECK(reservation == nullptr);
DCHECK(error_msg != nullptr);
DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg))
<< ((error_msg != nullptr) ? *error_msg : std::string());
flags |= MAP_FIXED;
+ } else if (reservation != nullptr) {
+ DCHECK(error_msg != nullptr);
+ if (!CheckReservation(expected_ptr, byte_count, filename, *reservation, error_msg)) {
+ return Invalid();
+ }
+ flags |= MAP_FIXED;
} else {
CHECK_EQ(0, flags & MAP_FIXED);
// Don't bother checking for an overlapping region here. We'll
@@ -522,6 +570,11 @@
page_aligned_byte_count -= redzone_size;
}
+ if (reservation != nullptr) {
+ // Re-mapping was successful, transfer the ownership of the memory to the new MemMap.
+ DCHECK_EQ(actual, reservation->Begin());
+ reservation->ReleaseReservedMemory(byte_count);
+ }
return MemMap(filename,
actual + page_offset,
byte_count,
@@ -699,6 +752,45 @@
return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
}
+MemMap MemMap::TakeReservedMemory(size_t byte_count) {
+ uint8_t* begin = Begin();
+ ReleaseReservedMemory(byte_count); // Performs necessary DCHECK()s on this reservation.
+ size_t base_size = RoundUp(byte_count, kPageSize);
+ return MemMap(name_, begin, byte_count, begin, base_size, prot_, /* reuse */ false);
+}
+
+void MemMap::ReleaseReservedMemory(size_t byte_count) {
+ // Check the reservation mapping.
+ DCHECK(IsValid());
+ DCHECK(!reuse_);
+ DCHECK(!already_unmapped_);
+ DCHECK_EQ(redzone_size_, 0u);
+ DCHECK_EQ(begin_, base_begin_);
+ DCHECK_EQ(size_, base_size_);
+ DCHECK_ALIGNED(begin_, kPageSize);
+ DCHECK_ALIGNED(size_, kPageSize);
+
+ // Check and round up the `byte_count`.
+ DCHECK_NE(byte_count, 0u);
+ DCHECK_LE(byte_count, size_);
+ byte_count = RoundUp(byte_count, kPageSize);
+
+ if (byte_count == size_) {
+ Invalidate();
+ } else {
+ // Shrink the reservation MemMap and update its `gMaps` entry.
+ std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+ auto it = GetGMapsEntry(*this);
+ // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+ gMaps->erase(it);
+ begin_ += byte_count;
+ size_ -= byte_count;
+ base_begin_ = begin_;
+ base_size_ = size_;
+ gMaps->emplace(base_begin_, this);
+ }
+}
+
void MemMap::MadviseDontNeedAndZero() {
if (base_begin_ != nullptr || base_size_ != 0) {
if (!kMadviseZeroes) {
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index f5fead0..20eda32 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -119,7 +119,10 @@
// Request an anonymous region of length 'byte_count' and a requested base address.
// Use null as the requested base address if you don't care.
- // "reuse" allows re-mapping an address range from an existing mapping.
+ //
+ // `reuse` allows re-mapping an address range from an existing mapping which retains the
+ // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+ // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
//
// The word "anonymous" in this context means "not backed by a file". The supplied
// 'name' will be used -- on systems that support it -- to give the mapping
@@ -132,15 +135,23 @@
int prot,
bool low_4gb,
bool reuse,
- std::string* error_msg,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg,
bool use_debug_name = true);
static MemMap MapAnonymous(const char* name,
uint8_t* addr,
size_t byte_count,
int prot,
bool low_4gb,
- std::string* error_msg) {
- return MapAnonymous(name, addr, byte_count, prot, low_4gb, /* reuse */ false, error_msg);
+ /*out*/std::string* error_msg) {
+ return MapAnonymous(name,
+ addr,
+ byte_count,
+ prot,
+ low_4gb,
+ /* reuse */ false,
+ /* reservation */ nullptr,
+ error_msg);
}
// Create placeholder for a region allocated by direct call to mmap.
@@ -167,18 +178,23 @@
flags,
fd,
start,
- /*low_4gb*/low_4gb,
- /*reuse*/false,
+ /* low_4gb */ low_4gb,
filename,
+ /* reuse */ false,
+ /* reservation */ nullptr,
error_msg);
}
// Map part of a file, taking care of non-page aligned offsets. The "start" offset is absolute,
// not relative. This version allows requesting a specific address for the base of the mapping.
- // "reuse" allows us to create a view into an existing mapping where we do not take ownership of
- // the memory. If error_msg is null then we do not print /proc/maps to the log if
- // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
- // printing /proc/maps takes several milliseconds in the worst case.
+ //
+ // `reuse` allows re-mapping an address range from an existing mapping which retains the
+ // ownership of the memory. Alternatively, `reservation` allows re-mapping the start of an
+ // existing reservation mapping, transferring the ownership of the memory to the new MemMap.
+ //
+ // If error_msg is null then we do not print /proc/maps to the log if MapFileAtAddress fails.
+ // This helps improve performance of the fail case since reading and printing /proc/maps takes
+ // several milliseconds in the worst case.
//
// On success, returns returns a valid MemMap. On failure, returns an invalid MemMap.
static MemMap MapFileAtAddress(uint8_t* addr,
@@ -188,9 +204,10 @@
int fd,
off_t start,
bool low_4gb,
- bool reuse,
const char* filename,
- std::string* error_msg);
+ bool reuse,
+ /*inout*/MemMap* reservation,
+ /*out*/std::string* error_msg);
const std::string& GetName() const {
return name_;
@@ -244,6 +261,14 @@
std::string* error_msg,
bool use_debug_name = true);
+ // Take ownership of pages at the beginning of the mapping. The mapping must be an
+ // anonymous reservation mapping, owning entire pages. The `byte_count` must not
+ // exceed the size of this reservation.
+ //
+ // Returns a mapping owning `byte_count` bytes rounded up to entire pages
+ // with size set to the passed `byte_count`.
+ MemMap TakeReservedMemory(size_t byte_count);
+
static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
REQUIRES(!MemMap::mem_maps_lock_);
static void DumpMaps(std::ostream& os, bool terse = false)
@@ -307,12 +332,21 @@
off_t offset)
REQUIRES(!MemMap::mem_maps_lock_);
+ // Release memory owned by a reservation mapping.
+ void ReleaseReservedMemory(size_t byte_count);
+
// member function to access real_munmap
static bool CheckMapRequest(uint8_t* expected_ptr,
void* actual_ptr,
size_t byte_count,
std::string* error_msg);
+ static bool CheckReservation(uint8_t* expected_ptr,
+ size_t byte_count,
+ const char* name,
+ const MemMap& reservation,
+ /*out*/std::string* error_msg);
+
std::string name_;
uint8_t* begin_ = nullptr; // Start of data. May be changed by AlignBy.
size_t size_ = 0u; // Length of data.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index 396f12b..ab3d18f 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -540,6 +540,7 @@
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
/* reuse */ false,
+ /* reservation */ nullptr,
&error_msg);
ASSERT_TRUE(map.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -549,6 +550,7 @@
PROT_READ | PROT_WRITE,
/* low_4gb */ false,
/* reuse */ true,
+ /* reservation */ nullptr,
&error_msg);
ASSERT_TRUE(map2.IsValid());
ASSERT_TRUE(error_msg.empty());
@@ -720,4 +722,108 @@
}
}
+TEST_F(MemMapTest, Reservation) {
+ CommonInit();
+ std::string error_msg;
+ ScratchFile scratch_file;
+ constexpr size_t kMapSize = 5 * kPageSize;
+ std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
+ ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
+
+ MemMap reservation = MemMap::MapAnonymous("Test reservation",
+ /* addr */ nullptr,
+ kMapSize,
+ PROT_NONE,
+ /* low_4gb */ false,
+ &error_msg);
+ ASSERT_TRUE(reservation.IsValid());
+ ASSERT_TRUE(error_msg.empty());
+
+ // Map first part of the reservation.
+ constexpr size_t kChunk1Size = kPageSize - 1u;
+ static_assert(kChunk1Size < kMapSize, "We want to split the reservation.");
+ uint8_t* addr1 = reservation.Begin();
+ MemMap map1 = MemMap::MapFileAtAddress(addr1,
+ /* byte_count */ kChunk1Size,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /* start */ 0,
+ /* low_4gb */ false,
+ scratch_file.GetFilename().c_str(),
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map1.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map1.Size(), kChunk1Size);
+ ASSERT_EQ(addr1, map1.Begin());
+ ASSERT_TRUE(reservation.IsValid());
+ // Entire pages are taken from the `reservation`.
+ ASSERT_LT(map1.End(), map1.BaseEnd());
+ ASSERT_EQ(map1.BaseEnd(), reservation.Begin());
+
+ // Map second part as an anonymous mapping.
+ constexpr size_t kChunk2Size = 2 * kPageSize;
+ DCHECK_LT(kChunk2Size, reservation.Size()); // We want to split the reservation.
+ uint8_t* addr2 = reservation.Begin();
+ MemMap map2 = MemMap::MapAnonymous("MiddleReservation",
+ addr2,
+ /* byte_count */ kChunk2Size,
+ PROT_READ,
+ /* low_4gb */ false,
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map2.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2.Size(), kChunk2Size);
+ ASSERT_EQ(addr2, map2.Begin());
+ ASSERT_EQ(map2.End(), map2.BaseEnd()); // kChunk2Size is page aligned.
+ ASSERT_EQ(map2.BaseEnd(), reservation.Begin());
+
+ // Map the rest of the reservation except the last byte.
+ const size_t kChunk3Size = reservation.Size() - 1u;
+ uint8_t* addr3 = reservation.Begin();
+ MemMap map3 = MemMap::MapFileAtAddress(addr3,
+ /* byte_count */ kChunk3Size,
+ PROT_READ,
+ MAP_PRIVATE,
+ scratch_file.GetFd(),
+ /* start */ dchecked_integral_cast<size_t>(addr3 - addr1),
+ /* low_4gb */ false,
+ scratch_file.GetFilename().c_str(),
+ /* reuse */ false,
+ &reservation,
+ &error_msg);
+ ASSERT_TRUE(map3.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map3.Size(), kChunk3Size);
+ ASSERT_EQ(addr3, map3.Begin());
+ // Entire pages are taken from the `reservation`, so it's now exhausted.
+ ASSERT_FALSE(reservation.IsValid());
+
+ // Now split the MiddleReservation.
+ constexpr size_t kChunk2ASize = kPageSize - 1u;
+ DCHECK_LT(kChunk2ASize, map2.Size()); // We want to split the reservation.
+ MemMap map2a = map2.TakeReservedMemory(kChunk2ASize);
+ ASSERT_TRUE(map2a.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2a.Size(), kChunk2ASize);
+ ASSERT_EQ(addr2, map2a.Begin());
+ ASSERT_TRUE(map2.IsValid());
+ ASSERT_LT(map2a.End(), map2a.BaseEnd());
+ ASSERT_EQ(map2a.BaseEnd(), map2.Begin());
+
+ // And take the rest of the middle reservation.
+ const size_t kChunk2BSize = map2.Size() - 1u;
+ uint8_t* addr2b = map2.Begin();
+ MemMap map2b = map2.TakeReservedMemory(kChunk2BSize);
+ ASSERT_TRUE(map2b.IsValid()) << error_msg;
+ ASSERT_TRUE(error_msg.empty());
+ ASSERT_EQ(map2b.Size(), kChunk2ASize);
+ ASSERT_EQ(addr2b, map2b.Begin());
+ ASSERT_FALSE(map2.IsValid());
+}
+
} // namespace art
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index a841bae..174d227 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -133,16 +133,14 @@
}
MemMap map =
- MemMap::MapFileAtAddress(nullptr, // Expected pointer address
- GetUncompressedLength(), // Byte count
- PROT_READ | PROT_WRITE,
- MAP_PRIVATE,
- zip_fd,
- offset,
- false, // Don't restrict allocation to lower4GB
- false, // Doesn't overlap existing map (reuse=false)
- name.c_str(),
- /*out*/error_msg);
+ MemMap::MapFile(GetUncompressedLength(), // Byte count
+ PROT_READ | PROT_WRITE,
+ MAP_PRIVATE,
+ zip_fd,
+ offset,
+ /* low_4gb */ false,
+ name.c_str(),
+ error_msg);
if (!map.IsValid()) {
DCHECK(!error_msg->empty());