Verify there's no mem map gap for immune region not to break.

This adds code that verifies that there's no memory map gap between
the image space and the main space so that the immune region
functionality won't silently break. For example, if there's a gap and
a large object is allocated in that gap, the large object is
incorrectly part of the immune region and the marking breaks.

Bug: 14059466
Change-Id: Ie6ed82988d74b6d0562ebbbaac96ee43c15b14a6
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index c108a5f..fe76c92 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -250,4 +250,65 @@
 }
 #endif
 
+TEST_F(MemMapTest, CheckNoGaps) {
+  std::string error_msg;
+  constexpr size_t kNumPages = 3;
+  // Map a 3-page mem map.
+  std::unique_ptr<MemMap> map(MemMap::MapAnonymous("MapAnonymous0",
+                                                   nullptr,
+                                                   kPageSize * kNumPages,
+                                                   PROT_READ | PROT_WRITE,
+                                                   false,
+                                                   &error_msg));
+  ASSERT_TRUE(map.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  // Record the base address.
+  byte* map_base = reinterpret_cast<byte*>(map->BaseBegin());
+  // Unmap it.
+  map.reset();
+
+  // Map at the same address, but in page-sized separate mem maps,
+  // assuming the space at the address is still available.
+  std::unique_ptr<MemMap> map0(MemMap::MapAnonymous("MapAnonymous0",
+                                                    map_base,
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    &error_msg));
+  ASSERT_TRUE(map0.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  std::unique_ptr<MemMap> map1(MemMap::MapAnonymous("MapAnonymous1",
+                                                    map_base + kPageSize,
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    &error_msg));
+  ASSERT_TRUE(map1.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+  std::unique_ptr<MemMap> map2(MemMap::MapAnonymous("MapAnonymous2",
+                                                    map_base + kPageSize * 2,
+                                                    kPageSize,
+                                                    PROT_READ | PROT_WRITE,
+                                                    false,
+                                                    &error_msg));
+  ASSERT_TRUE(map2.get() != nullptr) << error_msg;
+  ASSERT_TRUE(error_msg.empty());
+
+  // One-map cases.
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map0.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map1.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map2.get(), map2.get()));
+
+  // Two or three-map cases.
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map1.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map1.get(), map2.get()));
+  ASSERT_TRUE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+
+  // Unmap the middle one.
+  map1.reset();
+
+  // Should return false now that there's a gap in the middle.
+  ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
+}
+
 }  // namespace art