Fix a double unmap issue in MemMap::UnMapAtEnd().

MemMap::UnMapAtEnd() unmaps the unused tail of the alloc space during
a zygote fork. But it can cause the same tail region of the memory to
be unmapped twice (once in UnMapAtEnd() and once more in ~MemMap()
during a shutdown.)

I encountered a crash because of this issue in SpaceTest.ZygoteTest
(which happens to happen only on a device in a branch with the
rosalloc change probably due to some randomness in mmap address
choice, etc.)

Here's what happens:

1) CreateZygoteSpace() will call UnMapAtEnd() and unmap the unused
tail of the alloc space.

2) In the same function, after UnMapAtEnd(), several libc new/malloc
allocations, including a new DlMallocSpace object, happen. This
happens to cause libc to map a new memory region that overlaps with
the memory region that has just been unmapped in 1) and use it to
allocate those allocations (that is, the new DlMallocSpace object is
allocated in that memory region.) This is a second DlMallocSpace that
becomes the new alloc space after zygote fork. The first DlMallocSpace
becomes the zygote space. Note that that libc maps that memory region
before the underlying memory of the second DlMallocSpace is mapped.

3) During a Runtime shutdown (which happens once for a normal VM
shutdown or at the end of each test run) all the spaces get destructed
including the the two DlMallocSpaces one by one. When the first
DlMallocSpace gets destructed (note the space list is sorted by
address,) its super destructor ~MemMap() unmaps the original memory
region that's already partially unmapped in 2). Now this memory region
includes the libc memory region that includes the second DlMallocSpace
object.

4) When the second DlMallocSpace object gets attempted to be
destructed, the memory in which the object resides is already unmapped
in 3) and causes a SIGSEGV.

This change replaces UnMapAtEnd() with a new function RemapAtEnd()
which combines the unmapping of the tail region and remapping of it to
achieve the following two things:

1) Fixes this double unmap issue by updating the base_size_ member
variable to exclude the already-unmapped tail region so that ~MemMap()
will not unmap the tail region again.

2) Improves on the non-atomicity issue in the unmap/map sequence in
CreateZygoteSpace(). That is, once the unused tail portion of the
memory region of the origina alloc space is unmapped, something like
libc could come along and take that memory region, before the memory
region is mapped again for the new alloc space. This, as a result,
would make a hole between the old alloc (new zygote) space and the new
alloc space and cause the two spaces to be
non-contiguous. RemapAtEnd() eliminates new/malloc allocations between
the unmap and the map calls. But note this still isn't perfect as
other threads could in theory take the memory region between the
munmap and the mmap calls.

Added tests.

Change-Id: I43bc3a33a2cbfc7a092890312e34aa5285384589
diff --git a/runtime/gc/space/dlmalloc_space.cc b/runtime/gc/space/dlmalloc_space.cc
index 8c13d79..9ebc16a 100644
--- a/runtime/gc/space/dlmalloc_space.cc
+++ b/runtime/gc/space/dlmalloc_space.cc
@@ -287,8 +287,6 @@
   size_t size = RoundUp(Size(), kPageSize);
   // Trim the heap so that we minimize the size of the Zygote space.
   Trim();
-  // Trim our mem-map to free unused pages.
-  GetMemMap()->UnMapAtEnd(end_);
   // TODO: Not hardcode these in?
   const size_t starting_size = kPageSize;
   const size_t initial_size = 2 * MB;
@@ -308,9 +306,10 @@
   VLOG(heap) << "Size " << GetMemMap()->Size();
   VLOG(heap) << "GrowthLimit " << PrettySize(growth_limit);
   VLOG(heap) << "Capacity " << PrettySize(capacity);
+  // Remap the tail.
   std::string error_msg;
-  UniquePtr<MemMap> mem_map(MemMap::MapAnonymous(alloc_space_name, End(), capacity,
-                                                 PROT_READ | PROT_WRITE, &error_msg));
+  UniquePtr<MemMap> mem_map(GetMemMap()->RemapAtEnd(end_, alloc_space_name,
+                                                    PROT_READ | PROT_WRITE, &error_msg));
   CHECK(mem_map.get() != nullptr) << error_msg;
   void* mspace = CreateMallocSpace(end_, starting_size, initial_size);
   // Protect memory beyond the initial size.
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 00316f7..70d3457 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -170,12 +170,73 @@
   }
 };
 
-void MemMap::UnMapAtEnd(byte* new_end) {
+MemMap* MemMap::RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                           std::string* error_msg) {
   DCHECK_GE(new_end, Begin());
   DCHECK_LE(new_end, End());
-  size_t unmap_size = End() - new_end;
-  munmap(new_end, unmap_size);
-  size_ -= unmap_size;
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  DCHECK(IsAligned<kPageSize>(begin_));
+  DCHECK(IsAligned<kPageSize>(base_begin_));
+  DCHECK(IsAligned<kPageSize>(reinterpret_cast<byte*>(base_begin_) + base_size_));
+  DCHECK(IsAligned<kPageSize>(new_end));
+  byte* old_end = begin_ + size_;
+  byte* old_base_end = reinterpret_cast<byte*>(base_begin_) + base_size_;
+  byte* new_base_end = new_end;
+  DCHECK_LE(new_base_end, old_base_end);
+  if (new_base_end == old_base_end) {
+    return new MemMap(tail_name, NULL, 0, NULL, 0, tail_prot);
+  }
+  size_ = new_end - reinterpret_cast<byte*>(begin_);
+  base_size_ = new_base_end - reinterpret_cast<byte*>(base_begin_);
+  DCHECK_LE(begin_ + size_, reinterpret_cast<byte*>(base_begin_) + base_size_);
+  size_t tail_size = old_end - new_end;
+  byte* tail_base_begin = new_base_end;
+  size_t tail_base_size = old_base_end - new_base_end;
+  DCHECK_EQ(tail_base_begin + tail_base_size, old_base_end);
+  DCHECK(IsAligned<kPageSize>(tail_base_size));
+
+#ifdef USE_ASHMEM
+  // android_os_Debug.cpp read_mapinfo assumes all ashmem regions associated with the VM are
+  // prefixed "dalvik-".
+  std::string debug_friendly_name("dalvik-");
+  debug_friendly_name += tail_name;
+  ScopedFd fd(ashmem_create_region(debug_friendly_name.c_str(), tail_base_size));
+  int flags = MAP_PRIVATE;
+  if (fd.get() == -1) {
+    *error_msg = StringPrintf("ashmem_create_region failed for '%s': %s",
+                              tail_name, strerror(errno));
+    return nullptr;
+  }
+#else
+  ScopedFd fd(-1);
+  int flags = MAP_PRIVATE | MAP_ANONYMOUS;
+#endif
+
+  // Unmap/map the tail region.
+  int result = munmap(tail_base_begin, tail_base_size);
+  if (result == -1) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("munmap(%p, %zd) failed for '%s'\n%s",
+                              tail_base_begin, tail_base_size, name_.c_str(),
+                              maps.c_str());
+    return nullptr;
+  }
+  // Don't cause memory allocation between the munmap and the mmap
+  // calls. Otherwise, libc (or something else) might take this memory
+  // region. Note this isn't perfect as there's no way to prevent
+  // other threads to try to take this memory region here.
+  byte* actual = reinterpret_cast<byte*>(mmap(tail_base_begin, tail_base_size, tail_prot,
+                                              flags, fd.get(), 0));
+  if (actual == MAP_FAILED) {
+    std::string maps;
+    ReadFileToString("/proc/self/maps", &maps);
+    *error_msg = StringPrintf("anonymous mmap(%p, %zd, %x, %x, %d, 0) failed\n%s",
+                              tail_base_begin, tail_base_size, tail_prot, flags, fd.get(),
+                              maps.c_str());
+    return nullptr;
+  }
+  return new MemMap(tail_name, actual, tail_size, actual, tail_base_size, tail_prot);
 }
 
 bool MemMap::Protect(int prot) {
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 919463c..2c65833 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -84,8 +84,9 @@
     return Begin() <= addr && addr < End();
   }
 
-  // Trim by unmapping pages at the end of the map.
-  void UnMapAtEnd(byte* new_end);
+  // Unmap the pages at end and remap them to create another memory map.
+  MemMap* RemapAtEnd(byte* new_end, const char* tail_name, int tail_prot,
+                     std::string* error_msg);
 
  private:
   MemMap(const std::string& name, byte* begin, size_t size, void* base_begin, size_t base_size,
@@ -96,8 +97,10 @@
   size_t size_;  // Length of data.
 
   void* const base_begin_;  // Page-aligned base address.
-  const size_t base_size_;  // Length of mapping.
+  size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
+
+  friend class MemMapTest;  // To allow access to base_begin_ and base_size_.
 };
 
 }  // namespace art
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index 09de320..cf2c9d0 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -21,7 +21,15 @@
 
 namespace art {
 
-class MemMapTest : public testing::Test {};
+class MemMapTest : public testing::Test {
+ public:
+  byte* BaseBegin(MemMap* mem_map) {
+    return reinterpret_cast<byte*>(mem_map->base_begin_);
+  }
+  size_t BaseSize(MemMap* mem_map) {
+    return mem_map->base_size_;
+  }
+};
 
 TEST_F(MemMapTest, MapAnonymousEmpty) {
   std::string error_msg;
@@ -34,4 +42,57 @@
   ASSERT_TRUE(error_msg.empty());
 }
 
+TEST_F(MemMapTest, RemapAtEnd) {
+  std::string error_msg;
+  // Cast the page size to size_t.
+  const size_t page_size = static_cast<size_t>(kPageSize);
+  // Map a two-page memory region.
+  MemMap* m0 = MemMap::MapAnonymous("MemMapTest_RemapAtEndTest_map0",
+                                    NULL,
+                                    2 * page_size,
+                                    PROT_READ | PROT_WRITE,
+                                    &error_msg);
+  // Check its state and write to it.
+  byte* base0 = m0->Begin();
+  ASSERT_TRUE(base0 != NULL) << error_msg;
+  size_t size0 = m0->Size();
+  EXPECT_EQ(m0->Size(), 2 * page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), size0);
+  memset(base0, 42, 2 * page_size);
+  // Remap the latter half into a second MemMap.
+  MemMap* m1 = m0->RemapAtEnd(base0 + page_size,
+                              "MemMapTest_RemapAtEndTest_map1",
+                              PROT_READ | PROT_WRITE,
+                              &error_msg);
+  // Check the states of the two maps.
+  EXPECT_EQ(m0->Begin(), base0) << error_msg;
+  EXPECT_EQ(m0->Size(), page_size);
+  EXPECT_EQ(BaseBegin(m0), base0);
+  EXPECT_EQ(BaseSize(m0), page_size);
+  byte* base1 = m1->Begin();
+  size_t size1 = m1->Size();
+  EXPECT_EQ(base1, base0 + page_size);
+  EXPECT_EQ(size1, page_size);
+  EXPECT_EQ(BaseBegin(m1), base1);
+  EXPECT_EQ(BaseSize(m1), size1);
+  // Write to the second region.
+  memset(base1, 43, page_size);
+  // Check the contents of the two regions.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base0[i], 42);
+  }
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  // Unmap the first region.
+  delete m0;
+  // Make sure the second region is still accessible after the first
+  // region is unmapped.
+  for (size_t i = 0; i < page_size; ++i) {
+    EXPECT_EQ(base1[i], 43);
+  }
+  delete m1;
+}
+
 }  // namespace art