Remove unnecessary indirection from MemMap.

Avoid plain MemMap pointers being passed around by changing
the MemMap to moveable and return MemMap objects by value.
Previously we could have a valid zero-size MemMap but this
is now forbidden.

MemMap::RemapAtEnd() is changed to avoid the explicit call
to munmap(); mmap() with MAP_FIXED automatically removes
old mappings for overlapping regions.

Test: m test-art-host-gtest
Test: testrunner.py --host --optimizing
Test: Pixel 2 XL boots.
Test: m test-art-target-gtest
Test: testrunner.py --target --optimizing
Change-Id: I12bd453c26a396edc20eb141bfd4dad20923f170
diff --git a/libartbase/base/mem_map.cc b/libartbase/base/mem_map.cc
index 5cea869..c417d01 100644
--- a/libartbase/base/mem_map.cc
+++ b/libartbase/base/mem_map.cc
@@ -61,6 +61,21 @@
 // All the non-empty MemMaps. Use a multimap as we do a reserve-and-divide (eg ElfMap::Load()).
 static Maps* gMaps GUARDED_BY(MemMap::GetMemMapsLock()) = nullptr;
 
+// Retrieve iterator to a `gMaps` entry that is known to exist.
+Maps::iterator GetGMapsEntry(const MemMap& map) REQUIRES(MemMap::GetMemMapsLock()) {
+  DCHECK(map.IsValid());
+  DCHECK(gMaps != nullptr);
+  for (auto it = gMaps->lower_bound(map.BaseBegin()), end = gMaps->end();
+       it != end && it->first == map.BaseBegin();
+       ++it) {
+    if (it->second == &map) {
+      return it;
+    }
+  }
+  LOG(FATAL) << "MemMap not found";
+  UNREACHABLE();
+}
+
 std::ostream& operator<<(std::ostream& os, const Maps& mem_maps) {
   os << "MemMap:" << std::endl;
   for (auto it = mem_maps.begin(); it != mem_maps.end(); ++it) {
@@ -231,20 +246,21 @@
 }
 #endif
 
-MemMap* MemMap::MapAnonymous(const char* name,
-                             uint8_t* expected_ptr,
-                             size_t byte_count,
-                             int prot,
-                             bool low_4gb,
-                             bool reuse,
-                             std::string* error_msg,
-                             bool use_ashmem) {
+MemMap MemMap::MapAnonymous(const char* name,
+                            uint8_t* addr,
+                            size_t byte_count,
+                            int prot,
+                            bool low_4gb,
+                            bool reuse,
+                            std::string* error_msg,
+                            bool use_ashmem) {
 #ifndef __LP64__
   UNUSED(low_4gb);
 #endif
   use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, prot, false);
+    *error_msg = "Empty MemMap requested.";
+    return Invalid();
   }
   size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
 
@@ -252,9 +268,9 @@
   if (reuse) {
     // reuse means it is okay that it overlaps an existing page mapping.
     // Only use this if you actually made the page reservation yourself.
-    CHECK(expected_ptr != nullptr);
+    CHECK(addr != nullptr);
 
-    DCHECK(ContainedWithinExistingMap(expected_ptr, byte_count, error_msg)) << *error_msg;
+    DCHECK(ContainedWithinExistingMap(addr, byte_count, error_msg)) << *error_msg;
     flags |= MAP_FIXED;
   }
 
@@ -296,7 +312,7 @@
   // We need to store and potentially set an error number for pretty printing of errors
   int saved_errno = 0;
 
-  void* actual = MapInternal(expected_ptr,
+  void* actual = MapInternal(addr,
                              page_aligned_byte_count,
                              prot,
                              flags,
@@ -313,28 +329,33 @@
 
       *error_msg = StringPrintf("Failed anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0): %s. "
                                     "See process maps in the log.",
-                                expected_ptr,
+                                addr,
                                 page_aligned_byte_count,
                                 prot,
                                 flags,
                                 fd.get(),
                                 strerror(saved_errno));
     }
-    return nullptr;
+    return Invalid();
   }
-  if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
-    return nullptr;
+  if (!CheckMapRequest(addr, actual, page_aligned_byte_count, error_msg)) {
+    return Invalid();
   }
-  return new MemMap(name, reinterpret_cast<uint8_t*>(actual), byte_count, actual,
-                    page_aligned_byte_count, prot, reuse);
+  return MemMap(name,
+                reinterpret_cast<uint8_t*>(actual),
+                byte_count,
+                actual,
+                page_aligned_byte_count,
+                prot,
+                reuse);
 }
 
-MemMap* MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
+MemMap MemMap::MapDummy(const char* name, uint8_t* addr, size_t byte_count) {
   if (byte_count == 0) {
-    return new MemMap(name, nullptr, 0, nullptr, 0, 0, false);
+    return Invalid();
   }
   const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize);
-  return new MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
+  return MemMap(name, addr, byte_count, addr, page_aligned_byte_count, 0, true /* reuse */);
 }
 
 template<typename A, typename B>
@@ -342,19 +363,18 @@
   return static_cast<ptrdiff_t>(reinterpret_cast<intptr_t>(a) - reinterpret_cast<intptr_t>(b));
 }
 
-bool MemMap::ReplaceWith(MemMap** source_ptr, /*out*/std::string* error) {
+bool MemMap::ReplaceWith(MemMap* source, /*out*/std::string* error) {
 #if !HAVE_MREMAP_SYSCALL
   UNUSED(source_ptr);
   *error = "Cannot perform atomic replace because we are missing the required mremap syscall";
   return false;
 #else  // !HAVE_MREMAP_SYSCALL
-  CHECK(source_ptr != nullptr);
-  CHECK(*source_ptr != nullptr);
+  CHECK(source != nullptr);
+  CHECK(source->IsValid());
   if (!MemMap::kCanReplaceMapping) {
     *error = "Unable to perform atomic replace due to runtime environment!";
     return false;
   }
-  MemMap* source = *source_ptr;
   // neither can be reuse.
   if (source->reuse_ || reuse_) {
     *error = "One or both mappings is not a real mmap!";
@@ -406,12 +426,9 @@
   // them later.
   size_t new_base_size = std::max(source->base_size_, base_size_);
 
-  // Delete the old source, don't unmap it though (set reuse) since it is already gone.
-  *source_ptr = nullptr;
+  // Invalidate *source, don't unmap it though since it is already gone.
   size_t source_size = source->size_;
-  source->already_unmapped_ = true;
-  delete source;
-  source = nullptr;
+  source->Invalidate();
 
   size_ = source_size;
   base_size_ = new_base_size;
@@ -422,16 +439,16 @@
 #endif  // !HAVE_MREMAP_SYSCALL
 }
 
-MemMap* MemMap::MapFileAtAddress(uint8_t* expected_ptr,
-                                 size_t byte_count,
-                                 int prot,
-                                 int flags,
-                                 int fd,
-                                 off_t start,
-                                 bool low_4gb,
-                                 bool reuse,
-                                 const char* filename,
-                                 std::string* error_msg) {
+MemMap MemMap::MapFileAtAddress(uint8_t* expected_ptr,
+                                size_t byte_count,
+                                int prot,
+                                int flags,
+                                int fd,
+                                off_t start,
+                                bool low_4gb,
+                                bool reuse,
+                                const char* filename,
+                                std::string* error_msg) {
   CHECK_NE(0, prot);
   CHECK_NE(0, flags & (MAP_SHARED | MAP_PRIVATE));
 
@@ -452,7 +469,7 @@
   }
 
   if (byte_count == 0) {
-    return new MemMap(filename, nullptr, 0, nullptr, 0, prot, false);
+    return Invalid();
   }
   // Adjust 'offset' to be page-aligned as required by mmap.
   int page_offset = start % kPageSize;
@@ -491,10 +508,10 @@
                                 static_cast<int64_t>(page_aligned_offset), filename,
                                 strerror(saved_errno));
     }
-    return nullptr;
+    return Invalid();
   }
   if (!CheckMapRequest(expected_ptr, actual, page_aligned_byte_count, error_msg)) {
-    return nullptr;
+    return Invalid();
   }
   if (redzone_size != 0) {
     const uint8_t *real_start = actual + page_offset;
@@ -506,14 +523,27 @@
     page_aligned_byte_count -= redzone_size;
   }
 
-  return new MemMap(filename, actual + page_offset, byte_count, actual, page_aligned_byte_count,
-                    prot, reuse, redzone_size);
+  return MemMap(filename,
+                actual + page_offset,
+                byte_count,
+                actual,
+                page_aligned_byte_count,
+                prot,
+                reuse,
+                redzone_size);
+}
+
+MemMap::MemMap(MemMap&& other)
+    : MemMap() {
+  swap(other);
 }
 
 MemMap::~MemMap() {
-  if (base_begin_ == nullptr && base_size_ == 0) {
-    return;
-  }
+  Reset();
+}
+
+void MemMap::DoReset() {
+  DCHECK(IsValid());
 
   // Unlike Valgrind, AddressSanitizer requires that all manually poisoned memory is unpoisoned
   // before it is returned to the system.
@@ -533,19 +563,56 @@
     }
   }
 
+  Invalidate();
+}
+
+void MemMap::Invalidate() {
+  DCHECK(IsValid());
+
   // Remove it from gMaps.
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
-  bool found = false;
-  DCHECK(gMaps != nullptr);
-  for (auto it = gMaps->lower_bound(base_begin_), end = gMaps->end();
-       it != end && it->first == base_begin_; ++it) {
-    if (it->second == this) {
-      found = true;
-      gMaps->erase(it);
-      break;
+  auto it = GetGMapsEntry(*this);
+  gMaps->erase(it);
+
+  // Mark it as invalid.
+  base_size_ = 0u;
+  DCHECK(!IsValid());
+}
+
+void MemMap::swap(MemMap& other) {
+  if (IsValid() || other.IsValid()) {
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    DCHECK(gMaps != nullptr);
+    auto this_it = IsValid() ? GetGMapsEntry(*this) : gMaps->end();
+    auto other_it = other.IsValid() ? GetGMapsEntry(other) : gMaps->end();
+    if (IsValid()) {
+      DCHECK(this_it != gMaps->end());
+      DCHECK_EQ(this_it->second, this);
+      this_it->second = &other;
     }
+    if (other.IsValid()) {
+      DCHECK(other_it != gMaps->end());
+      DCHECK_EQ(other_it->second, &other);
+      other_it->second = this;
+    }
+    // Swap members with the `mem_maps_lock_` held so that `base_begin_` matches
+    // with the `gMaps` key when other threads try to use `gMaps`.
+    SwapMembers(other);
+  } else {
+    SwapMembers(other);
   }
-  CHECK(found) << "MemMap not found";
+}
+
+void MemMap::SwapMembers(MemMap& other) {
+  name_.swap(other.name_);
+  std::swap(begin_, other.begin_);
+  std::swap(size_, other.size_);
+  std::swap(base_begin_, other.base_begin_);
+  std::swap(base_size_, other.base_size_);
+  std::swap(prot_, other.prot_);
+  std::swap(reuse_, other.reuse_);
+  std::swap(already_unmapped_, other.already_unmapped_);
+  std::swap(redzone_size_, other.redzone_size_);
 }
 
 MemMap::MemMap(const std::string& name, uint8_t* begin, size_t size, void* base_begin,
@@ -568,8 +635,11 @@
   }
 }
 
-MemMap* MemMap::RemapAtEnd(uint8_t* new_end, const char* tail_name, int tail_prot,
-                           std::string* error_msg, bool use_ashmem) {
+MemMap MemMap::RemapAtEnd(uint8_t* new_end,
+                          const char* tail_name,
+                          int tail_prot,
+                          std::string* error_msg,
+                          bool use_ashmem) {
   use_ashmem = use_ashmem && !kIsTargetLinux && !kIsTargetFuchsia;
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
@@ -583,11 +653,11 @@
   uint8_t* new_base_end = new_end;
   DCHECK_LE(new_base_end, old_base_end);
   if (new_base_end == old_base_end) {
-    return new MemMap(tail_name, nullptr, 0, nullptr, 0, tail_prot, false);
+    return Invalid();
   }
-  size_ = new_end - reinterpret_cast<uint8_t*>(begin_);
-  base_size_ = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
-  DCHECK_LE(begin_ + size_, reinterpret_cast<uint8_t*>(base_begin_) + base_size_);
+  size_t new_size = new_end - reinterpret_cast<uint8_t*>(begin_);
+  size_t new_base_size = new_base_end - reinterpret_cast<uint8_t*>(base_begin_);
+  DCHECK_LE(begin_ + new_size, reinterpret_cast<uint8_t*>(base_begin_) + new_base_size);
   size_t tail_size = old_end - new_end;
   uint8_t* tail_base_begin = new_base_end;
   size_t tail_base_size = old_base_end - new_base_end;
@@ -595,7 +665,7 @@
   DCHECK_ALIGNED(tail_base_size, kPageSize);
 
   unique_fd fd;
-  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+  int flags = MAP_PRIVATE | MAP_FIXED | MAP_ANONYMOUS;
   if (use_ashmem) {
     // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
     // prefixed "dalvik-".
@@ -606,23 +676,14 @@
     if (fd.get() == -1) {
       *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
                                 tail_name, strerror(errno));
-      return nullptr;
+      return Invalid();
     }
   }
 
   MEMORY_TOOL_MAKE_UNDEFINED(tail_base_begin, tail_base_size);
-  // Unmap/map the tail region.
-  int result = TargetMUnmap(tail_base_begin, tail_base_size);
-  if (result == -1) {
-    PrintFileToLog("/proc/self/maps", LogSeverity::WARNING);
-    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'. See process maps in the log.",
-                              tail_base_begin, tail_base_size, name_.c_str());
-    return nullptr;
-  }
-  // Don't cause memory allocation between the munmap and the mmap
-  // calls. Otherwise, libc (or something else) might take this memory
-  // region. Note this isn't perfect as there's no way to prevent
-  // other threads to try to take this memory region here.
+  // Note: Do not explicitly unmap the tail region, mmap() with MAP_FIXED automatically
+  // removes old mappings for the overlapping region. This makes the operation atomic
+  // and prevents other threads from racing to allocate memory in the requested region.
   uint8_t* actual = reinterpret_cast<uint8_t*>(TargetMMap(tail_base_begin,
                                                           tail_base_size,
                                                           tail_prot,
@@ -634,9 +695,18 @@
     *error_msg = StringPrintf("anonymous mmap(%p, %zd, 0x%x, 0x%x, %d, 0) failed. See process "
                               "maps in the log.", tail_base_begin, tail_base_size, tail_prot, flags,
                               fd.get());
-    return nullptr;
+    return Invalid();
   }
-  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
+  // Update *this.
+  if (new_base_size == 0u) {
+    std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+    auto it = GetGMapsEntry(*this);
+    gMaps->erase(it);
+  }
+  size_ = new_size;
+  base_size_ = new_base_size;
+  // Return the new mapping.
+  return MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot, false);
 }
 
 void MemMap::MadviseDontNeedAndZero() {
@@ -675,15 +745,15 @@
   return false;
 }
 
-bool MemMap::CheckNoGaps(MemMap* begin_map, MemMap* end_map) {
+bool MemMap::CheckNoGaps(MemMap& begin_map, MemMap& end_map) {
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
-  CHECK(begin_map != nullptr);
-  CHECK(end_map != nullptr);
+  CHECK(begin_map.IsValid());
+  CHECK(end_map.IsValid());
   CHECK(HasMemMap(begin_map));
   CHECK(HasMemMap(end_map));
-  CHECK_LE(begin_map->BaseBegin(), end_map->BaseBegin());
-  MemMap* map = begin_map;
-  while (map->BaseBegin() != end_map->BaseBegin()) {
+  CHECK_LE(begin_map.BaseBegin(), end_map.BaseBegin());
+  MemMap* map = &begin_map;
+  while (map->BaseBegin() != end_map.BaseBegin()) {
     MemMap* next_map = GetLargestMemMapAt(map->BaseEnd());
     if (next_map == nullptr) {
       // Found a gap.
@@ -758,11 +828,11 @@
   }
 }
 
-bool MemMap::HasMemMap(MemMap* map) {
-  void* base_begin = map->BaseBegin();
+bool MemMap::HasMemMap(MemMap& map) {
+  void* base_begin = map.BaseBegin();
   for (auto it = gMaps->lower_bound(base_begin), end = gMaps->end();
        it != end && it->first == base_begin; ++it) {
-    if (it->second == map) {
+    if (it->second == &map) {
       return true;
     }
   }
@@ -1049,6 +1119,7 @@
   CHECK_EQ(size_, base_size_) << "Unsupported";
   CHECK_GT(size, static_cast<size_t>(kPageSize));
   CHECK_ALIGNED(size, kPageSize);
+  CHECK(!reuse_);
   if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
       IsAlignedParam(base_size_, size)) {
     // Already aligned.
@@ -1079,17 +1150,17 @@
         << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
   }
   std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+  if (base_begin < aligned_base_begin) {
+    auto it = GetGMapsEntry(*this);
+    // TODO: When C++17 becomes available, use std::map<>::extract(), modify, insert.
+    gMaps->erase(it);
+    gMaps->insert(std::make_pair(aligned_base_begin, this));
+  }
   base_begin_ = aligned_base_begin;
   base_size_ = aligned_base_size;
   begin_ = aligned_base_begin;
   size_ = aligned_base_size;
   DCHECK(gMaps != nullptr);
-  if (base_begin < aligned_base_begin) {
-    auto it = gMaps->find(base_begin);
-    CHECK(it != gMaps->end()) << "MemMap not found";
-    gMaps->erase(it);
-    gMaps->insert(std::make_pair(base_begin_, this));
-  }
 }
 
 }  // namespace art
diff --git a/libartbase/base/mem_map.h b/libartbase/base/mem_map.h
index 1979357..525fade 100644
--- a/libartbase/base/mem_map.h
+++ b/libartbase/base/mem_map.h
@@ -60,6 +60,37 @@
  public:
   static constexpr bool kCanReplaceMapping = HAVE_MREMAP_SYSCALL;
 
+  // Creates an invalid mapping.
+  MemMap() {}
+
+  // Creates an invalid mapping. Used when we want to be more explicit than MemMap().
+  static MemMap Invalid() {
+    return MemMap();
+  }
+
+  MemMap(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_);
+  MemMap& operator=(MemMap&& other) REQUIRES(!MemMap::mem_maps_lock_) {
+    Reset();
+    swap(other);
+    return *this;
+  }
+
+  // Releases the memory mapping.
+  ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+
+  // Swap two MemMaps.
+  void swap(MemMap& other);
+
+  void Reset() {
+    if (IsValid()) {
+      DoReset();
+    }
+  }
+
+  bool IsValid() const {
+    return base_size_ != 0u;
+  }
+
   // Replace the data in this memmmap with the data in the memmap pointed to by source. The caller
   // relinquishes ownership of the source mmap.
   //
@@ -74,15 +105,14 @@
   //   * mremap must succeed when called on the mappings.
   //
   // If this call succeeds it will return true and:
-  //   * Deallocate *source
-  //   * Sets *source to nullptr
+  //   * Invalidate *source
   //   * The protection of this will remain the same.
   //   * The size of this will be the size of the source
   //   * The data in this will be the data from source.
   //
   // If this call fails it will return false and make no changes to *source or this. The ownership
   // of the source mmap is returned to the caller.
-  bool ReplaceWith(/*in-out*/MemMap** source, /*out*/std::string* error);
+  bool ReplaceWith(/*in-out*/MemMap* source, /*out*/std::string* error);
 
   // Request an anonymous region of length 'byte_count' and a requested base address.
   // Use null as the requested base address if you don't care.
@@ -92,34 +122,34 @@
   // 'name' will be used -- on systems that support it -- to give the mapping
   // a name.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapAnonymous(const char* name,
-                              uint8_t* addr,
-                              size_t byte_count,
-                              int prot,
-                              bool low_4gb,
-                              bool reuse,
-                              std::string* error_msg,
-                              bool use_ashmem = true);
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapAnonymous(const char* name,
+                             uint8_t* addr,
+                             size_t byte_count,
+                             int prot,
+                             bool low_4gb,
+                             bool reuse,
+                             std::string* error_msg,
+                             bool use_ashmem = true);
 
   // Create placeholder for a region allocated by direct call to mmap.
   // This is useful when we do not have control over the code calling mmap,
   // but when we still want to keep track of it in the list.
   // The region is not considered to be owned and will not be unmmaped.
-  static MemMap* MapDummy(const char* name, uint8_t* addr, size_t byte_count);
+  static MemMap MapDummy(const char* name, uint8_t* addr, size_t byte_count);
 
   // Map part of a file, taking care of non-page aligned offsets.  The
   // "start" offset is absolute, not relative.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFile(size_t byte_count,
-                         int prot,
-                         int flags,
-                         int fd,
-                         off_t start,
-                         bool low_4gb,
-                         const char* filename,
-                         std::string* error_msg) {
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapFile(size_t byte_count,
+                        int prot,
+                        int flags,
+                        int fd,
+                        off_t start,
+                        bool low_4gb,
+                        const char* filename,
+                        std::string* error_msg) {
     return MapFileAtAddress(nullptr,
                             byte_count,
                             prot,
@@ -139,20 +169,17 @@
   // MapFileAtAddress fails. This helps improve performance of the fail case since reading and
   // printing /proc/maps takes several milliseconds in the worst case.
   //
-  // On success, returns returns a MemMap instance.  On failure, returns null.
-  static MemMap* MapFileAtAddress(uint8_t* addr,
-                                  size_t byte_count,
-                                  int prot,
-                                  int flags,
-                                  int fd,
-                                  off_t start,
-                                  bool low_4gb,
-                                  bool reuse,
-                                  const char* filename,
-                                  std::string* error_msg);
-
-  // Releases the memory mapping.
-  ~MemMap() REQUIRES(!MemMap::mem_maps_lock_);
+  // On success, returns returns a valid MemMap.  On failure, returns an invalid MemMap.
+  static MemMap MapFileAtAddress(uint8_t* addr,
+                                 size_t byte_count,
+                                 int prot,
+                                 int flags,
+                                 int fd,
+                                 off_t start,
+                                 bool low_4gb,
+                                 bool reuse,
+                                 const char* filename,
+                                 std::string* error_msg);
 
   const std::string& GetName() const {
     return name_;
@@ -200,13 +227,13 @@
   }
 
   // Unmap the pages at end and remap them to create another memory map.
-  MemMap* RemapAtEnd(uint8_t* new_end,
-                     const char* tail_name,
-                     int tail_prot,
-                     std::string* error_msg,
-                     bool use_ashmem = true);
+  MemMap RemapAtEnd(uint8_t* new_end,
+                    const char* tail_name,
+                    int tail_prot,
+                    std::string* error_msg,
+                    bool use_ashmem = true);
 
-  static bool CheckNoGaps(MemMap* begin_map, MemMap* end_map)
+  static bool CheckNoGaps(MemMap& begin_map, MemMap& end_map)
       REQUIRES(!MemMap::mem_maps_lock_);
   static void DumpMaps(std::ostream& os, bool terse = false)
       REQUIRES(!MemMap::mem_maps_lock_);
@@ -240,9 +267,13 @@
          bool reuse,
          size_t redzone_size = 0) REQUIRES(!MemMap::mem_maps_lock_);
 
+  void DoReset();
+  void Invalidate();
+  void SwapMembers(MemMap& other);
+
   static void DumpMapsLocked(std::ostream& os, bool terse)
       REQUIRES(MemMap::mem_maps_lock_);
-  static bool HasMemMap(MemMap* map)
+  static bool HasMemMap(MemMap& map)
       REQUIRES(MemMap::mem_maps_lock_);
   static MemMap* GetLargestMemMapAt(void* address)
       REQUIRES(MemMap::mem_maps_lock_);
@@ -271,23 +302,23 @@
                               size_t byte_count,
                               std::string* error_msg);
 
-  const std::string name_;
-  uint8_t* begin_;  // Start of data. May be changed by AlignBy.
-  size_t size_;  // Length of data.
+  std::string name_;
+  uint8_t* begin_ = nullptr;    // Start of data. May be changed by AlignBy.
+  size_t size_ = 0u;            // Length of data.
 
-  void* base_begin_;  // Page-aligned base address. May be changed by AlignBy.
-  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
-  int prot_;  // Protection of the map.
+  void* base_begin_ = nullptr;  // Page-aligned base address. May be changed by AlignBy.
+  size_t base_size_ = 0u;       // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
+  int prot_ = 0;                // Protection of the map.
 
   // When reuse_ is true, this is just a view of an existing mapping
   // and we do not take ownership and are not responsible for
   // unmapping.
-  const bool reuse_;
+  bool reuse_ = false;
 
   // When already_unmapped_ is true the destructor will not call munmap.
-  bool already_unmapped_;
+  bool already_unmapped_ = false;
 
-  const size_t redzone_size_;
+  size_t redzone_size_ = 0u;
 
 #if USE_ART_LOW_4G_ALLOCATOR
   static uintptr_t next_mem_pos_;   // Next memory location to check for low_4g extent.
@@ -309,6 +340,10 @@
   friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
 
+inline void swap(MemMap& lhs, MemMap& rhs) {
+  lhs.swap(rhs);
+}
+
 std::ostream& operator<<(std::ostream& os, const MemMap& mem_map);
 
 // Zero and release pages if possible, no requirements on alignments.
diff --git a/libartbase/base/mem_map_test.cc b/libartbase/base/mem_map_test.cc
index c575c7a..b2f5c72 100644
--- a/libartbase/base/mem_map_test.cc
+++ b/libartbase/base/mem_map_test.cc
@@ -30,14 +30,6 @@
 
 class MemMapTest : public CommonArtTest {
  public:
-  static uint8_t* BaseBegin(MemMap* mem_map) {
-    return reinterpret_cast<uint8_t*>(mem_map->base_begin_);
-  }
-
-  static size_t BaseSize(MemMap* mem_map) {
-    return mem_map->base_size_;
-  }
-
   static bool IsAddressMapped(void* addr) {
     bool res = msync(addr, 1, MS_SYNC) == 0;
     if (!res && errno != ENOMEM) {
@@ -60,15 +52,15 @@
   static uint8_t* GetValidMapAddress(size_t size, bool low_4gb) {
     // Find a valid map address and unmap it before returning.
     std::string error_msg;
-    std::unique_ptr<MemMap> map(MemMap::MapAnonymous("temp",
-                                                     nullptr,
-                                                     size,
-                                                     PROT_READ,
-                                                     low_4gb,
-                                                     false,
-                                                     &error_msg));
-    CHECK(map != nullptr);
-    return map->Begin();
+    MemMap map = MemMap::MapAnonymous("temp",
+                                      /* addr */ nullptr,
+                                      size,
+                                      PROT_READ,
+                                      low_4gb,
+                                      /* reuse */ false,
+                                      &error_msg);
+    CHECK(map.IsValid());
+    return map.Begin();
   }
 
   static void RemapAtEndTest(bool low_4gb) {
@@ -76,37 +68,38 @@
     // Cast the page size to size_t.
     const size_t page_size = static_cast<size_t>(kPageSize);
     // Map a two-page memory region.
-    MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
-                                      nullptr,
-                                      2 * page_size,
-                                      PROT_READ | PROT_WRITE,
-                                      low_4gb,
-                                      false,
-                                      &error_msg);
+    MemMap m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                     /* addr */ nullptr,
+                                     2 * page_size,
+                                     PROT_READ | PROT_WRITE,
+                                     low_4gb,
+                                     /* reuse */ false,
+                                     &error_msg);
     // Check its state and write to it.
-    uint8_t* base0 = m0->Begin();
+    ASSERT_TRUE(m0.IsValid());
+    uint8_t* base0 = m0.Begin();
     ASSERT_TRUE(base0 != nullptr) << error_msg;
-    size_t size0 = m0->Size();
-    EXPECT_EQ(m0->Size(), 2 * page_size);
-    EXPECT_EQ(BaseBegin(m0), base0);
-    EXPECT_EQ(BaseSize(m0), size0);
+    size_t size0 = m0.Size();
+    EXPECT_EQ(m0.Size(), 2 * page_size);
+    EXPECT_EQ(m0.BaseBegin(), base0);
+    EXPECT_EQ(m0.BaseSize(), size0);
     memset(base0, 42, 2 * page_size);
     // Remap the latter half into a second MemMap.
-    MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
-                                "MemMapTest_RemapAtEndTest_map1",
-                                PROT_READ | PROT_WRITE,
-                                &error_msg);
+    MemMap m1 = m0.RemapAtEnd(base0 + page_size,
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
     // Check the states of the two maps.
-    EXPECT_EQ(m0->Begin(), base0) << error_msg;
-    EXPECT_EQ(m0->Size(), page_size);
-    EXPECT_EQ(BaseBegin(m0), base0);
-    EXPECT_EQ(BaseSize(m0), page_size);
-    uint8_t* base1 = m1->Begin();
-    size_t size1 = m1->Size();
+    EXPECT_EQ(m0.Begin(), base0) << error_msg;
+    EXPECT_EQ(m0.Size(), page_size);
+    EXPECT_EQ(m0.BaseBegin(), base0);
+    EXPECT_EQ(m0.BaseSize(), page_size);
+    uint8_t* base1 = m1.Begin();
+    size_t size1 = m1.Size();
     EXPECT_EQ(base1, base0 + page_size);
     EXPECT_EQ(size1, page_size);
-    EXPECT_EQ(BaseBegin(m1), base1);
-    EXPECT_EQ(BaseSize(m1), size1);
+    EXPECT_EQ(m1.BaseBegin(), base1);
+    EXPECT_EQ(m1.BaseSize(), size1);
     // Write to the second region.
     memset(base1, 43, page_size);
     // Check the contents of the two regions.
@@ -117,13 +110,18 @@
       EXPECT_EQ(base1[i], 43);
     }
     // Unmap the first region.
-    delete m0;
+    m0.Reset();
     // Make sure the second region is still accessible after the first
     // region is unmapped.
     for (size_t i = 0; i < page_size; ++i) {
       EXPECT_EQ(base1[i], 43);
     }
-    delete m1;
+    MemMap m2 = m1.RemapAtEnd(m1.Begin(),
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
+    ASSERT_TRUE(m2.IsValid()) << error_msg;
+    ASSERT_FALSE(m1.IsValid());
   }
 
   void CommonInit() {
@@ -168,232 +166,241 @@
 #if HAVE_MREMAP_SYSCALL
 TEST_F(MemMapTest, ReplaceMapping_SameSize) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    kPageSize,
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  void* source_addr = source->Begin();
-  void* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     kPageSize,
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  void* source_addr = source.Begin();
+  void* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
 
   std::vector<uint8_t> data = RandomData(kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_MakeLarger) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    5 * kPageSize,  // Need to make it larger
-                                                                    // initially so we know
-                                                                    // there won't be mappings
-                                                                    // in the way we we move
-                                                                    // source.
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        3 * kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     5 * kPageSize,  // Need to make it larger
+                                                     // initially so we know
+                                                     // there won't be mappings
+                                                     // in the way we we move
+                                                     // source.
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       3 * kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
 
   // Fill the source with random data.
   std::vector<uint8_t> data = RandomData(3 * kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
 
   // Make the dest smaller so that we know we'll have space.
-  dest->SetSize(kPageSize);
+  dest.SetSize(kPageSize);
 
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_MakeSmaller) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
-                                                    nullptr,
-                                                    3 * kPageSize,
-                                                    PROT_READ,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(dest != nullptr);
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        nullptr,
-                                        kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap dest = MemMap::MapAnonymous("MapAnonymousEmpty-atomic-replace-dest",
+                                     /* addr */ nullptr,
+                                     3 * kPageSize,
+                                     PROT_READ,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(dest.IsValid());
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       /* addr */ nullptr,
+                                       kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(3 * kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(3 * kPageSize));
 
   std::vector<uint8_t> data = RandomData(kPageSize);
-  memcpy(source->Begin(), data.data(), kPageSize);
+  memcpy(source.Begin(), data.data(), kPageSize);
 
-  ASSERT_TRUE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_TRUE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
   ASSERT_FALSE(IsAddressMapped(source_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
   ASSERT_FALSE(IsAddressMapped(dest_addr + 2 * kPageSize));
-  ASSERT_TRUE(source == nullptr);
+  ASSERT_FALSE(source.IsValid());
 
-  ASSERT_EQ(memcmp(dest->Begin(), data.data(), dest->Size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), data.data(), dest.Size()), 0);
 }
 
 TEST_F(MemMapTest, ReplaceMapping_FailureOverlap) {
   std::string error_msg;
-  std::unique_ptr<MemMap> dest(
+  MemMap dest =
       MemMap::MapAnonymous(
           "MapAnonymousEmpty-atomic-replace-dest",
-          nullptr,
+          /* addr */ nullptr,
           3 * kPageSize,  // Need to make it larger initially so we know there won't be mappings in
                           // the way we we move source.
           PROT_READ | PROT_WRITE,
-          false,
-          false,
-          &error_msg));
-  ASSERT_TRUE(dest != nullptr);
+          /* low_4gb */ false,
+          /* reuse */ false,
+          &error_msg);
+  ASSERT_TRUE(dest.IsValid());
   // Resize down to 1 page so we can remap the rest.
-  dest->SetSize(kPageSize);
+  dest.SetSize(kPageSize);
   // Create source from the last 2 pages
-  MemMap* source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
-                                        dest->Begin() + kPageSize,
-                                        2 * kPageSize,
-                                        PROT_WRITE | PROT_READ,
-                                        false,
-                                        false,
-                                        &error_msg);
-  ASSERT_TRUE(source != nullptr);
-  MemMap* orig_source = source;
-  ASSERT_EQ(dest->Begin() + kPageSize, source->Begin());
-  uint8_t* source_addr = source->Begin();
-  uint8_t* dest_addr = dest->Begin();
+  MemMap source = MemMap::MapAnonymous("MapAnonymous-atomic-replace-source",
+                                       dest.Begin() + kPageSize,
+                                       2 * kPageSize,
+                                       PROT_WRITE | PROT_READ,
+                                       /* low_4gb */ false,
+                                       /* reuse */ false,
+                                       &error_msg);
+  ASSERT_TRUE(source.IsValid());
+  ASSERT_EQ(dest.Begin() + kPageSize, source.Begin());
+  uint8_t* source_addr = source.Begin();
+  uint8_t* dest_addr = dest.Begin();
   ASSERT_TRUE(IsAddressMapped(source_addr));
 
   // Fill the source and dest with random data.
   std::vector<uint8_t> data = RandomData(2 * kPageSize);
-  memcpy(source->Begin(), data.data(), data.size());
+  memcpy(source.Begin(), data.data(), data.size());
   std::vector<uint8_t> dest_data = RandomData(kPageSize);
-  memcpy(dest->Begin(), dest_data.data(), dest_data.size());
+  memcpy(dest.Begin(), dest_data.data(), dest_data.size());
 
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_EQ(dest->Size(), static_cast<size_t>(kPageSize));
+  ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize));
 
-  ASSERT_FALSE(dest->ReplaceWith(&source, &error_msg)) << error_msg;
+  ASSERT_FALSE(dest.ReplaceWith(&source, &error_msg)) << error_msg;
 
-  ASSERT_TRUE(source == orig_source);
   ASSERT_TRUE(IsAddressMapped(source_addr));
   ASSERT_TRUE(IsAddressMapped(dest_addr));
-  ASSERT_EQ(source->Size(), data.size());
-  ASSERT_EQ(dest->Size(), dest_data.size());
+  ASSERT_EQ(source.Size(), data.size());
+  ASSERT_EQ(dest.Size(), dest_data.size());
 
-  ASSERT_EQ(memcmp(source->Begin(), data.data(), data.size()), 0);
-  ASSERT_EQ(memcmp(dest->Begin(), dest_data.data(), dest_data.size()), 0);
-
-  delete source;
+  ASSERT_EQ(memcmp(source.Begin(), data.data(), data.size()), 0);
+  ASSERT_EQ(memcmp(dest.Begin(), dest_data.data(), dest_data.size()), 0);
 }
 #endif  // HAVE_MREMAP_SYSCALL
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                                   nullptr,
-                                                   0,
-                                                   PROT_READ,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
-  ASSERT_TRUE(error_msg.empty());
-  map.reset(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                 nullptr,
-                                 kPageSize,
-                                 PROT_READ | PROT_WRITE,
-                                 false,
-                                 false,
-                                 &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+                                    /* addr */ nullptr,
+                                    0,
+                                    PROT_READ,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid()) << error_msg;
+  ASSERT_FALSE(error_msg.empty());
+
+  error_msg.clear();
+  map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+                             /* addr */ nullptr,
+                             kPageSize,
+                             PROT_READ | PROT_WRITE,
+                             /* low_4gb */ false,
+                             /* reuse */ false,
+                             &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
 }
 
 TEST_F(MemMapTest, MapAnonymousFailNullError) {
   CommonInit();
   // Test that we don't crash with a null error_str when mapping at an invalid location.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousInvalid",
-                                                    reinterpret_cast<uint8_t*>(kPageSize),
-                                                    0x20000,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    nullptr));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousInvalid",
+                                    reinterpret_cast<uint8_t*>(kPageSize),
+                                    0x20000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    nullptr);
+  ASSERT_FALSE(map.IsValid());
 }
 
 #ifdef __LP64__
 TEST_F(MemMapTest, MapAnonymousEmpty32bit) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousEmpty",
-                                                   nullptr,
-                                                   kPageSize,
-                                                   PROT_READ | PROT_WRITE,
-                                                   true,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymousEmpty",
+                                    /* addr */ nullptr,
+                                    0,
+                                    PROT_READ,
+                                    /* low_4gb */ true,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid()) << error_msg;
+  ASSERT_FALSE(error_msg.empty());
+
+  error_msg.clear();
+  map = MemMap::MapAnonymous("MapAnonymousNonEmpty",
+                             /* addr */ nullptr,
+                             kPageSize,
+                             PROT_READ | PROT_WRITE,
+                             /* low_4gb */ true,
+                             /* reuse */ false,
+                             &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
 }
 TEST_F(MemMapTest, MapFile32Bit) {
   CommonInit();
@@ -402,18 +409,18 @@
   constexpr size_t kMapSize = kPageSize;
   std::unique_ptr<uint8_t[]> data(new uint8_t[kMapSize]());
   ASSERT_TRUE(scratch_file.GetFile()->WriteFully(&data[0], kMapSize));
-  std::unique_ptr<MemMap> map(MemMap::MapFile(/*byte_count*/kMapSize,
-                                              PROT_READ,
-                                              MAP_PRIVATE,
-                                              scratch_file.GetFd(),
-                                              /*start*/0,
-                                              /*low_4gb*/true,
-                                              scratch_file.GetFilename().c_str(),
-                                              &error_msg));
-  ASSERT_TRUE(map != nullptr) << error_msg;
+  MemMap map = MemMap::MapFile(/*byte_count*/kMapSize,
+                               PROT_READ,
+                               MAP_PRIVATE,
+                               scratch_file.GetFd(),
+                               /*start*/0,
+                               /*low_4gb*/true,
+                               scratch_file.GetFilename().c_str(),
+                               &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_EQ(map->Size(), kMapSize);
-  ASSERT_LT(reinterpret_cast<uintptr_t>(BaseBegin(map.get())), 1ULL << 32);
+  ASSERT_EQ(map.Size(), kMapSize);
+  ASSERT_LT(reinterpret_cast<uintptr_t>(map.BaseBegin()), 1ULL << 32);
 }
 #endif
 
@@ -423,36 +430,36 @@
   // Find a valid address.
   uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false);
   // Map at an address that should work, which should succeed.
-  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                                    valid_address,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+  MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+                                     valid_address,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_TRUE(map0->BaseBegin() == valid_address);
+  ASSERT_TRUE(map0.BaseBegin() == valid_address);
   // Map at an unspecified address, which should succeed.
-  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
-                                                    nullptr,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+  MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+                                     /* addr */ nullptr,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_TRUE(map1->BaseBegin() != nullptr);
+  ASSERT_TRUE(map1.BaseBegin() != nullptr);
   // Attempt to map at the same address, which should fail.
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                                    reinterpret_cast<uint8_t*>(map1->BaseBegin()),
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map2.get() == nullptr) << error_msg;
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+                                     reinterpret_cast<uint8_t*>(map1.BaseBegin()),
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_FALSE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(!error_msg.empty());
 }
 
@@ -480,23 +487,23 @@
   // Try all addresses starting from 2GB to 4GB.
   size_t start_addr = 2 * GB;
   std::string error_msg;
-  std::unique_ptr<MemMap> map;
+  MemMap map;
   for (; start_addr <= std::numeric_limits<uint32_t>::max() - size; start_addr += size) {
-    map.reset(MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
-                                   reinterpret_cast<uint8_t*>(start_addr),
-                                   size,
-                                   PROT_READ | PROT_WRITE,
-                                   /*low_4gb*/true,
-                                   false,
-                                   &error_msg));
-    if (map != nullptr) {
+    map = MemMap::MapAnonymous("MapAnonymousExactAddr32bitHighAddr",
+                               reinterpret_cast<uint8_t*>(start_addr),
+                               size,
+                               PROT_READ | PROT_WRITE,
+                               /*low_4gb*/ true,
+                               /* reuse */ false,
+                               &error_msg);
+    if (map.IsValid()) {
       break;
     }
   }
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
-  ASSERT_GE(reinterpret_cast<uintptr_t>(map->End()), 2u * GB);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
+  ASSERT_GE(reinterpret_cast<uintptr_t>(map.End()), 2u * GB);
   ASSERT_TRUE(error_msg.empty());
-  ASSERT_EQ(BaseBegin(map.get()), reinterpret_cast<void*>(start_addr));
+  ASSERT_EQ(map.BaseBegin(), reinterpret_cast<void*>(start_addr));
 }
 
 TEST_F(MemMapTest, MapAnonymousOverflow) {
@@ -504,14 +511,14 @@
   std::string error_msg;
   uintptr_t ptr = 0;
   ptr -= kPageSize;  // Now it's close to the top.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousOverflow",
-                                                   reinterpret_cast<uint8_t*>(ptr),
-                                                   2 * kPageSize,  // brings it over the top.
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousOverflow",
+                                    reinterpret_cast<uint8_t*>(ptr),
+                                    2 * kPageSize,  // brings it over the top.
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 
@@ -519,29 +526,29 @@
 TEST_F(MemMapTest, MapAnonymousLow4GBExpectedTooHigh) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(
+  MemMap map =
       MemMap::MapAnonymous("MapAnonymousLow4GBExpectedTooHigh",
                            reinterpret_cast<uint8_t*>(UINT64_C(0x100000000)),
                            kPageSize,
                            PROT_READ | PROT_WRITE,
-                           true,
-                           false,
-                           &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+                           /* low_4gb */ true,
+                           /* reuse */ false,
+                           &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 
 TEST_F(MemMapTest, MapAnonymousLow4GBRangeTooHigh) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
-                                                   reinterpret_cast<uint8_t*>(0xF0000000),
-                                                   0x20000000,
-                                                   PROT_READ | PROT_WRITE,
-                                                   true,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_EQ(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousLow4GBRangeTooHigh",
+                                    reinterpret_cast<uint8_t*>(0xF0000000),
+                                    0x20000000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ true,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_FALSE(map.IsValid());
   ASSERT_FALSE(error_msg.empty());
 }
 #endif
@@ -549,23 +556,23 @@
 TEST_F(MemMapTest, MapAnonymousReuse) {
   CommonInit();
   std::string error_msg;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymousReserve",
-                                                   nullptr,
-                                                   0x20000,
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_NE(nullptr, map.get());
+  MemMap map = MemMap::MapAnonymous("MapAnonymousReserve",
+                                    nullptr,
+                                    0x20000,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_TRUE(map.IsValid());
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymousReused",
-                                                    reinterpret_cast<uint8_t*>(map->BaseBegin()),
-                                                    0x10000,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    true,
-                                                    &error_msg));
-  ASSERT_NE(nullptr, map2.get());
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymousReused",
+                                     reinterpret_cast<uint8_t*>(map.BaseBegin()),
+                                     0x10000,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ true,
+                                     &error_msg);
+  ASSERT_TRUE(map2.IsValid());
   ASSERT_TRUE(error_msg.empty());
 }
 
@@ -574,65 +581,65 @@
   std::string error_msg;
   constexpr size_t kNumPages = 3;
   // Map a 3-page mem map.
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
-                                                   nullptr,
-                                                   kPageSize * kNumPages,
-                                                   PROT_READ | PROT_WRITE,
-                                                   false,
-                                                   false,
-                                                   &error_msg));
-  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  MemMap map = MemMap::MapAnonymous("MapAnonymous0",
+                                    /* addr */ nullptr,
+                                    kPageSize * kNumPages,
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    &error_msg);
+  ASSERT_TRUE(map.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
   // Record the base address.
-  uint8_t* map_base = reinterpret_cast<uint8_t*>(map->BaseBegin());
+  uint8_t* map_base = reinterpret_cast<uint8_t*>(map.BaseBegin());
   // Unmap it.
-  map.reset();
+  map.Reset();
 
   // Map at the same address, but in page-sized separate mem maps,
   // assuming the space at the address is still available.
-  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
-                                                    map_base,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+  MemMap map0 = MemMap::MapAnonymous("MapAnonymous0",
+                                     map_base,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map0.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
-                                                    map_base + kPageSize,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+  MemMap map1 = MemMap::MapAnonymous("MapAnonymous1",
+                                     map_base + kPageSize,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map1.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
-  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
-                                                    map_base + kPageSize * 2,
-                                                    kPageSize,
-                                                    PROT_READ | PROT_WRITE,
-                                                    false,
-                                                    false,
-                                                    &error_msg));
-  ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+  MemMap map2 = MemMap::MapAnonymous("MapAnonymous2",
+                                     map_base + kPageSize * 2,
+                                     kPageSize,
+                                     PROT_READ | PROT_WRITE,
+                                     /* low_4gb */ false,
+                                     /* reuse */ false,
+                                     &error_msg);
+  ASSERT_TRUE(map2.IsValid()) << error_msg;
   ASSERT_TRUE(error_msg.empty());
 
   // One-map cases.
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map0));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1, map1));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map2, map2));
 
   // Two or three-map cases.
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
-  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map1));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1, map2));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0, map2));
 
   // Unmap the middle one.
-  map1.reset();
+  map1.Reset();
 
   // Should return false now that there's a gap in the middle.
-  ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+  ASSERT_FALSE(MemMap::CheckNoGaps(map0, map2));
 }
 
 TEST_F(MemMapTest, AlignBy) {
@@ -641,52 +648,53 @@
   // Cast the page size to size_t.
   const size_t page_size = static_cast<size_t>(kPageSize);
   // Map a region.
-  std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
-                                                  nullptr,
-                                                  14 * page_size,
-                                                  PROT_READ | PROT_WRITE,
-                                                  false,
-                                                  false,
-                                                  &error_msg));
-  uint8_t* base0 = m0->Begin();
+  MemMap m0 = MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+                                   /* addr */ nullptr,
+                                   14 * page_size,
+                                   PROT_READ | PROT_WRITE,
+                                   /* low_4gb */ false,
+                                   /* reuse */ false,
+                                   &error_msg);
+  ASSERT_TRUE(m0.IsValid());
+  uint8_t* base0 = m0.Begin();
   ASSERT_TRUE(base0 != nullptr) << error_msg;
-  ASSERT_EQ(m0->Size(), 14 * page_size);
-  ASSERT_EQ(BaseBegin(m0.get()), base0);
-  ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+  ASSERT_EQ(m0.Size(), 14 * page_size);
+  ASSERT_EQ(m0.BaseBegin(), base0);
+  ASSERT_EQ(m0.BaseSize(), m0.Size());
 
   // Break it into several regions by using RemapAtEnd.
-  std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
-                                            "MemMapTest_AlignByTest_map1",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base1 = m1->Begin();
+  MemMap m1 = m0.RemapAtEnd(base0 + 3 * page_size,
+                            "MemMapTest_AlignByTest_map1",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base1 = m1.Begin();
   ASSERT_TRUE(base1 != nullptr) << error_msg;
   ASSERT_EQ(base1, base0 + 3 * page_size);
-  ASSERT_EQ(m0->Size(), 3 * page_size);
+  ASSERT_EQ(m0.Size(), 3 * page_size);
 
-  std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
-                                            "MemMapTest_AlignByTest_map2",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base2 = m2->Begin();
+  MemMap m2 = m1.RemapAtEnd(base1 + 4 * page_size,
+                            "MemMapTest_AlignByTest_map2",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base2 = m2.Begin();
   ASSERT_TRUE(base2 != nullptr) << error_msg;
   ASSERT_EQ(base2, base1 + 4 * page_size);
-  ASSERT_EQ(m1->Size(), 4 * page_size);
+  ASSERT_EQ(m1.Size(), 4 * page_size);
 
-  std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
-                                            "MemMapTest_AlignByTest_map1",
-                                            PROT_READ | PROT_WRITE,
-                                            &error_msg));
-  uint8_t* base3 = m3->Begin();
+  MemMap m3 = m2.RemapAtEnd(base2 + 3 * page_size,
+                            "MemMapTest_AlignByTest_map1",
+                            PROT_READ | PROT_WRITE,
+                            &error_msg);
+  uint8_t* base3 = m3.Begin();
   ASSERT_TRUE(base3 != nullptr) << error_msg;
   ASSERT_EQ(base3, base2 + 3 * page_size);
-  ASSERT_EQ(m2->Size(), 3 * page_size);
-  ASSERT_EQ(m3->Size(), 4 * page_size);
+  ASSERT_EQ(m2.Size(), 3 * page_size);
+  ASSERT_EQ(m3.Size(), 4 * page_size);
 
-  uint8_t* end0 = base0 + m0->Size();
-  uint8_t* end1 = base1 + m1->Size();
-  uint8_t* end2 = base2 + m2->Size();
-  uint8_t* end3 = base3 + m3->Size();
+  uint8_t* end0 = base0 + m0.Size();
+  uint8_t* end1 = base1 + m1.Size();
+  uint8_t* end2 = base2 + m2.Size();
+  uint8_t* end3 = base3 + m3.Size();
 
   ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
 
@@ -703,39 +711,39 @@
   }
 
   // Align by 2 * page_size;
-  m0->AlignBy(2 * page_size);
-  m1->AlignBy(2 * page_size);
-  m2->AlignBy(2 * page_size);
-  m3->AlignBy(2 * page_size);
+  m0.AlignBy(2 * page_size);
+  m1.AlignBy(2 * page_size);
+  m2.AlignBy(2 * page_size);
+  m3.AlignBy(2 * page_size);
 
-  EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m0.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2.Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3.Begin(), 2 * page_size));
 
-  EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
-  EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m0.Begin() + m0.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1.Begin() + m1.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2.Begin() + m2.Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3.Begin() + m3.Size(), 2 * page_size));
 
   if (IsAlignedParam(base0, 2 * page_size)) {
-    EXPECT_EQ(m0->Begin(), base0);
-    EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
-    EXPECT_EQ(m1->Begin(), base1 + page_size);
-    EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
-    EXPECT_EQ(m2->Begin(), base2 + page_size);
-    EXPECT_EQ(m2->Begin() + m2->Size(), end2);
-    EXPECT_EQ(m3->Begin(), base3);
-    EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+    EXPECT_EQ(m0.Begin(), base0);
+    EXPECT_EQ(m0.Begin() + m0.Size(), end0 - page_size);
+    EXPECT_EQ(m1.Begin(), base1 + page_size);
+    EXPECT_EQ(m1.Begin() + m1.Size(), end1 - page_size);
+    EXPECT_EQ(m2.Begin(), base2 + page_size);
+    EXPECT_EQ(m2.Begin() + m2.Size(), end2);
+    EXPECT_EQ(m3.Begin(), base3);
+    EXPECT_EQ(m3.Begin() + m3.Size(), end3);
   } else {
-    EXPECT_EQ(m0->Begin(), base0 + page_size);
-    EXPECT_EQ(m0->Begin() + m0->Size(), end0);
-    EXPECT_EQ(m1->Begin(), base1);
-    EXPECT_EQ(m1->Begin() + m1->Size(), end1);
-    EXPECT_EQ(m2->Begin(), base2);
-    EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
-    EXPECT_EQ(m3->Begin(), base3 + page_size);
-    EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+    EXPECT_EQ(m0.Begin(), base0 + page_size);
+    EXPECT_EQ(m0.Begin() + m0.Size(), end0);
+    EXPECT_EQ(m1.Begin(), base1);
+    EXPECT_EQ(m1.Begin() + m1.Size(), end1);
+    EXPECT_EQ(m2.Begin(), base2);
+    EXPECT_EQ(m2.Begin() + m2.Size(), end2 - page_size);
+    EXPECT_EQ(m3.Begin(), base3 + page_size);
+    EXPECT_EQ(m3.Begin() + m3.Size(), end3 - page_size);
   }
 }
 
diff --git a/libartbase/base/zip_archive.cc b/libartbase/base/zip_archive.cc
index b5f946e..3c68ca1 100644
--- a/libartbase/base/zip_archive.cc
+++ b/libartbase/base/zip_archive.cc
@@ -68,31 +68,34 @@
   return true;
 }
 
-MemMap* ZipEntry::ExtractToMemMap(const char* zip_filename, const char* entry_filename,
-                                  std::string* error_msg) {
+MemMap ZipEntry::ExtractToMemMap(const char* zip_filename,
+                                 const char* entry_filename,
+                                 std::string* error_msg) {
   std::string name(entry_filename);
   name += " extracted in memory from ";
   name += zip_filename;
-  std::unique_ptr<MemMap> map(MemMap::MapAnonymous(name.c_str(),
-                                                   nullptr, GetUncompressedLength(),
-                                                   PROT_READ | PROT_WRITE, false, false,
-                                                   error_msg));
-  if (map.get() == nullptr) {
+  MemMap map = MemMap::MapAnonymous(name.c_str(),
+                                    /* addr */ nullptr,
+                                    GetUncompressedLength(),
+                                    PROT_READ | PROT_WRITE,
+                                    /* low_4gb */ false,
+                                    /* reuse */ false,
+                                    error_msg);
+  if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
-    return nullptr;
+    return MemMap::Invalid();
   }
 
-  const int32_t error = ExtractToMemory(handle_, zip_entry_,
-                                        map->Begin(), map->Size());
+  const int32_t error = ExtractToMemory(handle_, zip_entry_, map.Begin(), map.Size());
   if (error) {
     *error_msg = std::string(ErrorCodeString(error));
-    return nullptr;
+    return MemMap::Invalid();
   }
 
-  return map.release();
+  return map;
 }
 
-MemMap* ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyFromFile(const char* zip_filename, std::string* error_msg) {
   const int zip_fd = GetFileDescriptor(handle_);
   const char* entry_filename = entry_name_.c_str();
 
@@ -109,7 +112,7 @@
     *error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because it is compressed.",
                               entry_filename,
                               zip_filename);
-    return nullptr;
+    return MemMap::Invalid();
   } else if (zip_entry_->uncompressed_length != zip_entry_->compressed_length) {
     *error_msg = StringPrintf("Cannot map '%s' (in zip '%s') directly because "
                               "entry has bad size (%u != %u).",
@@ -117,7 +120,7 @@
                               zip_filename,
                               zip_entry_->uncompressed_length,
                               zip_entry_->compressed_length);
-    return nullptr;
+    return MemMap::Invalid();
   }
 
   std::string name(entry_filename);
@@ -130,7 +133,7 @@
     LOG(INFO) << "zip_archive: " << "make mmap of " << name << " @ offset = " << offset;
   }
 
-  std::unique_ptr<MemMap> map(
+  MemMap map =
       MemMap::MapFileAtAddress(nullptr,  // Expected pointer address
                                GetUncompressedLength(),  // Byte count
                                PROT_READ | PROT_WRITE,
@@ -140,9 +143,9 @@
                                false,  // Don't restrict allocation to lower4GB
                                false,  // Doesn't overlap existing map (reuse=false)
                                name.c_str(),
-                               /*out*/error_msg));
+                               /*out*/error_msg);
 
-  if (map == nullptr) {
+  if (!map.IsValid()) {
     DCHECK(!error_msg->empty());
   }
 
@@ -169,12 +172,12 @@
     LOG(INFO) << "---------------------------";
 
     // Dump map contents.
-    if (map != nullptr) {
+    if (map.IsValid()) {
       tmp = "";
 
       count = kMaxDumpChars;
 
-      uint8_t* begin = map->Begin();
+      uint8_t* begin = map.Begin();
       for (i = 0; i < count; ++i) {
         tmp += StringPrintf("%3d ", (unsigned int)begin[i]);
       }
@@ -185,19 +188,20 @@
     }
   }
 
-  return map.release();
+  return map;
 }
 
-MemMap* ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
-                                       const char* entry_filename,
-                                       std::string* error_msg) {
+MemMap ZipEntry::MapDirectlyOrExtract(const char* zip_filename,
+                                      const char* entry_filename,
+                                      std::string* error_msg) {
   if (IsUncompressed() && GetFileDescriptor(handle_) >= 0) {
-    MemMap* ret = MapDirectlyFromFile(zip_filename, error_msg);
-    if (ret != nullptr) {
+    std::string local_error_msg;
+    MemMap ret = MapDirectlyFromFile(zip_filename, &local_error_msg);
+    if (ret.IsValid()) {
       return ret;
     }
+    // Fall back to extraction for the failure case.
   }
-  // Fall back to extraction for the failure case.
   return ExtractToMemMap(zip_filename, entry_filename, error_msg);
 }
 
diff --git a/libartbase/base/zip_archive.h b/libartbase/base/zip_archive.h
index 73495da..8fc8b54 100644
--- a/libartbase/base/zip_archive.h
+++ b/libartbase/base/zip_archive.h
@@ -43,21 +43,22 @@
   bool ExtractToFile(File& file, std::string* error_msg);
   // Extract this entry to anonymous memory (R/W).
   // Returns null on failure and sets error_msg.
-  MemMap* ExtractToMemMap(const char* zip_filename, const char* entry_filename,
-                          std::string* error_msg);
+  MemMap ExtractToMemMap(const char* zip_filename,
+                         const char* entry_filename,
+                         std::string* error_msg);
   // Create a file-backed private (clean, R/W) memory mapping to this entry.
   // 'zip_filename' is used for diagnostics only,
   //   the original file that the ZipArchive was open with is used
   //   for the mapping.
   //
   // Will only succeed if the entry is stored uncompressed.
-  // Returns null on failure and sets error_msg.
-  MemMap* MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
+  // Returns invalid MemMap on failure and sets error_msg.
+  MemMap MapDirectlyFromFile(const char* zip_filename, /*out*/std::string* error_msg);
   virtual ~ZipEntry();
 
-  MemMap* MapDirectlyOrExtract(const char* zip_filename,
-                               const char* entry_filename,
-                               std::string* error_msg);
+  MemMap MapDirectlyOrExtract(const char* zip_filename,
+                              const char* entry_filename,
+                              std::string* error_msg);
 
   uint32_t GetUncompressedLength();
   uint32_t GetCrc32();