Improve the region space memory mapping.

Add a region space mmap fallback when the initial address isn't
available.

Clean up around the asan-specific base address.

Add MemMap::AlignBy to align the region space base address by the
region size, which is currently required by ReadBarrierTable.

Disable some read barriers in ZygoteCompactingCollector to avoid a
DCHECK failure in LockWord::SetMarkBitState when classes are in the
forward state due to unnecessary read barriers on
SizeOf/VisitReference.

Bug: 12687968
Test: test-art-host with CC and CMS.
Test: marlin-userdebug_asan_coverage boot.
Test: angler boots with CC and CMS.

Change-Id: I70f99779df6acc1b64cab6402f3ef7c73ce5b39b
diff --git a/runtime/base/bit_utils.h b/runtime/base/bit_utils.h
index f0811b0..4041f5e 100644
--- a/runtime/base/bit_utils.h
+++ b/runtime/base/bit_utils.h
@@ -152,6 +152,11 @@
   return (x & (n - 1)) == 0;
 }
 
+template<typename T>
+inline bool IsAlignedParam(T* x, int n) {
+  return IsAlignedParam(reinterpret_cast<const uintptr_t>(x), n);
+}
+
 #define CHECK_ALIGNED(value, alignment) \
   CHECK(::art::IsAligned<alignment>(value)) << reinterpret_cast<const void*>(value)
 
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
index cb9e7e2..f79a867 100644
--- a/runtime/gc/collector/semi_space.cc
+++ b/runtime/gc/collector/semi_space.cc
@@ -723,7 +723,9 @@
 void SemiSpace::ScanObject(Object* obj) {
   DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
   MarkObjectVisitor visitor(this);
-  obj->VisitReferences(visitor, visitor);
+  // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+  obj->VisitReferences</*kVisitNativeRoots*/true, kDefaultVerifyFlags, kWithoutReadBarrier>(
+      visitor, visitor);
 }
 
 // Scan anything that's on the mark stack.
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 34afa2a..1cbd503 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -133,6 +133,17 @@
 // config.
 static constexpr double kExtraHeapGrowthMultiplier = kUseReadBarrier ? 1.0 : 0.0;
 
+static const char* kRegionSpaceName = "main space (region space)";
+
+#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
+// 300 MB (0x12c00000) - (default non-moving space capacity).
+static uint8_t* const kPreferredAllocSpaceBegin =
+    reinterpret_cast<uint8_t*>(300 * MB - Heap::kDefaultNonMovingSpaceCapacity);
+#else
+// For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
+static uint8_t* const kPreferredAllocSpaceBegin = reinterpret_cast<uint8_t*>(0x20000000);
+#endif
+
 static inline bool CareAboutPauseTimes() {
   return Runtime::Current()->InJankPerceptibleProcessState();
 }
@@ -286,15 +297,9 @@
   // Requested begin for the alloc space, to follow the mapped image and oat files
   uint8_t* requested_alloc_space_begin = nullptr;
   if (foreground_collector_type_ == kCollectorTypeCC) {
-    // Need to use a low address so that we can allocate a contiguous
-    // 2 * Xmx space when there's no image (dex2oat for target).
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
-    CHECK_GE(300 * MB, non_moving_space_capacity);
-    requested_alloc_space_begin = reinterpret_cast<uint8_t*>(300 * MB) - non_moving_space_capacity;
-#else
-    // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
-    requested_alloc_space_begin = reinterpret_cast<uint8_t*>(0x20000000);
-#endif
+    // Need to use a low address so that we can allocate a contiguous 2 * Xmx space when there's no
+    // image (dex2oat for target).
+    requested_alloc_space_begin = kPreferredAllocSpaceBegin;
   }
 
   // Load image space(s).
@@ -369,12 +374,7 @@
                              &error_str));
     CHECK(non_moving_space_mem_map != nullptr) << error_str;
     // Try to reserve virtual memory at a lower address if we have a separate non moving space.
-#if defined(__LP64__) || !defined(ADDRESS_SANITIZER)
-    request_begin = reinterpret_cast<uint8_t*>(300 * MB);
-#else
-    // For 32-bit, use 0x20000000 because asan reserves 0x04000000 - 0x20000000.
-    request_begin = reinterpret_cast<uint8_t*>(0x20000000) + non_moving_space_capacity;
-#endif
+    request_begin = kPreferredAllocSpaceBegin + non_moving_space_capacity;
   }
   // Attempt to create 2 mem maps at or after the requested begin.
   if (foreground_collector_type_ != kCollectorTypeCC) {
@@ -419,7 +419,12 @@
   }
   // Create other spaces based on whether or not we have a moving GC.
   if (foreground_collector_type_ == kCollectorTypeCC) {
-    region_space_ = space::RegionSpace::Create("main space (region space)", capacity_ * 2, request_begin);
+    CHECK(separate_non_moving_space);
+    MemMap* region_space_mem_map = space::RegionSpace::CreateMemMap(kRegionSpaceName,
+                                                                    capacity_ * 2,
+                                                                    request_begin);
+    CHECK(region_space_mem_map != nullptr) << "No region space mem map";
+    region_space_ = space::RegionSpace::Create(kRegionSpaceName, region_space_mem_map);
     AddSpace(region_space_);
   } else if (IsMovingGc(foreground_collector_type_) &&
       foreground_collector_type_ != kCollectorTypeGSS) {
@@ -2327,7 +2332,9 @@
     size_t bin_size = object_addr - context->prev_;
     // Add the bin consisting of the end of the previous object to the start of the current object.
     collector->AddBin(bin_size, context->prev_);
-    context->prev_ = object_addr + RoundUp(obj->SizeOf(), kObjectAlignment);
+    // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+    context->prev_ = object_addr + RoundUp(obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>(),
+                                           kObjectAlignment);
   }
 
   void AddBin(size_t size, uintptr_t position) {
@@ -2347,7 +2354,8 @@
 
   virtual mirror::Object* MarkNonForwardedObject(mirror::Object* obj)
       REQUIRES(Locks::heap_bitmap_lock_, Locks::mutator_lock_) {
-    size_t obj_size = obj->SizeOf();
+    // Turn off read barrier. ZygoteCompactingCollector doesn't use it (even in the CC build.)
+    size_t obj_size = obj->SizeOf<kDefaultVerifyFlags, kWithoutReadBarrier>();
     size_t alloc_size = RoundUp(obj_size, kObjectAlignment);
     mirror::Object* forward_address;
     // Find the smallest bin which we can move obj in.
diff --git a/runtime/gc/space/region_space.cc b/runtime/gc/space/region_space.cc
index 8077319..321524c 100644
--- a/runtime/gc/space/region_space.cc
+++ b/runtime/gc/space/region_space.cc
@@ -28,20 +28,52 @@
 // value of the region size, evaculate the region.
 static constexpr uint kEvaculateLivePercentThreshold = 75U;
 
-RegionSpace* RegionSpace::Create(const std::string& name, size_t capacity,
-                                 uint8_t* requested_begin) {
-  capacity = RoundUp(capacity, kRegionSize);
+MemMap* RegionSpace::CreateMemMap(const std::string& name, size_t capacity,
+                                  uint8_t* requested_begin) {
+  CHECK_ALIGNED(capacity, kRegionSize);
   std::string error_msg;
-  std::unique_ptr<MemMap> mem_map(MemMap::MapAnonymous(name.c_str(), requested_begin, capacity,
-                                                       PROT_READ | PROT_WRITE, true, false,
-                                                       &error_msg));
+  // Ask for the capacity of an additional kRegionSize so that we can align the map by kRegionSize
+  // even if we get unaligned base address. This is necessary for the ReadBarrierTable to work.
+  std::unique_ptr<MemMap> mem_map;
+  while (true) {
+    mem_map.reset(MemMap::MapAnonymous(name.c_str(),
+                                       requested_begin,
+                                       capacity + kRegionSize,
+                                       PROT_READ | PROT_WRITE,
+                                       true,
+                                       false,
+                                       &error_msg));
+    if (mem_map.get() != nullptr || requested_begin == nullptr) {
+      break;
+    }
+    // Retry with no specified request begin.
+    requested_begin = nullptr;
+  }
   if (mem_map.get() == nullptr) {
     LOG(ERROR) << "Failed to allocate pages for alloc space (" << name << ") of size "
         << PrettySize(capacity) << " with message " << error_msg;
     MemMap::DumpMaps(LOG_STREAM(ERROR));
     return nullptr;
   }
-  return new RegionSpace(name, mem_map.release());
+  CHECK_EQ(mem_map->Size(), capacity + kRegionSize);
+  CHECK_EQ(mem_map->Begin(), mem_map->BaseBegin());
+  CHECK_EQ(mem_map->Size(), mem_map->BaseSize());
+  if (IsAlignedParam(mem_map->Begin(), kRegionSize)) {
+    // Got an aligned map. Since we requested a map that's kRegionSize larger. Shrink by
+    // kRegionSize at the end.
+    mem_map->SetSize(capacity);
+  } else {
+    // Got an unaligned map. Align the both ends.
+    mem_map->AlignBy(kRegionSize);
+  }
+  CHECK_ALIGNED(mem_map->Begin(), kRegionSize);
+  CHECK_ALIGNED(mem_map->End(), kRegionSize);
+  CHECK_EQ(mem_map->Size(), capacity);
+  return mem_map.release();
+}
+
+RegionSpace* RegionSpace::Create(const std::string& name, MemMap* mem_map) {
+  return new RegionSpace(name, mem_map);
 }
 
 RegionSpace::RegionSpace(const std::string& name, MemMap* mem_map)
diff --git a/runtime/gc/space/region_space.h b/runtime/gc/space/region_space.h
index f3b9595..f12373e 100644
--- a/runtime/gc/space/region_space.h
+++ b/runtime/gc/space/region_space.h
@@ -35,10 +35,11 @@
     return kSpaceTypeRegionSpace;
   }
 
-  // Create a region space with the requested sizes. The requested base address is not
+  // Create a region space mem map with the requested sizes. The requested base address is not
   // guaranteed to be granted, if it is required, the caller should call Begin on the returned
   // space to confirm the request was granted.
-  static RegionSpace* Create(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static MemMap* CreateMemMap(const std::string& name, size_t capacity, uint8_t* requested_begin);
+  static RegionSpace* Create(const std::string& name, MemMap* mem_map);
 
   // Allocate num_bytes, returns null if the space is full.
   mirror::Object* Alloc(Thread* self, size_t num_bytes, size_t* bytes_allocated,
diff --git a/runtime/mem_map.cc b/runtime/mem_map.cc
index 93c212b..40309b9 100644
--- a/runtime/mem_map.cc
+++ b/runtime/mem_map.cc
@@ -962,4 +962,52 @@
   }
 }
 
+void MemMap::AlignBy(size_t size) {
+  CHECK_EQ(begin_, base_begin_) << "Unsupported";
+  CHECK_EQ(size_, base_size_) << "Unsupported";
+  CHECK_GT(size, static_cast<size_t>(kPageSize));
+  CHECK_ALIGNED(size, kPageSize);
+  if (IsAlignedParam(reinterpret_cast<uintptr_t>(base_begin_), size) &&
+      IsAlignedParam(base_size_, size)) {
+    // Already aligned.
+    return;
+  }
+  uint8_t* base_begin = reinterpret_cast<uint8_t*>(base_begin_);
+  uint8_t* base_end = base_begin + base_size_;
+  uint8_t* aligned_base_begin = AlignUp(base_begin, size);
+  uint8_t* aligned_base_end = AlignDown(base_end, size);
+  CHECK_LE(base_begin, aligned_base_begin);
+  CHECK_LE(aligned_base_end, base_end);
+  size_t aligned_base_size = aligned_base_end - aligned_base_begin;
+  CHECK_LT(aligned_base_begin, aligned_base_end)
+      << "base_begin = " << reinterpret_cast<void*>(base_begin)
+      << " base_end = " << reinterpret_cast<void*>(base_end);
+  CHECK_GE(aligned_base_size, size);
+  // Unmap the unaligned parts.
+  if (base_begin < aligned_base_begin) {
+    MEMORY_TOOL_MAKE_UNDEFINED(base_begin, aligned_base_begin - base_begin);
+    CHECK_EQ(munmap(base_begin, aligned_base_begin - base_begin), 0)
+        << "base_begin=" << reinterpret_cast<void*>(base_begin)
+        << " aligned_base_begin=" << reinterpret_cast<void*>(aligned_base_begin);
+  }
+  if (aligned_base_end < base_end) {
+    MEMORY_TOOL_MAKE_UNDEFINED(aligned_base_end, base_end - aligned_base_end);
+    CHECK_EQ(munmap(aligned_base_end, base_end - aligned_base_end), 0)
+        << "base_end=" << reinterpret_cast<void*>(base_end)
+        << " aligned_base_end=" << reinterpret_cast<void*>(aligned_base_end);
+  }
+  std::lock_guard<std::mutex> mu(*mem_maps_lock_);
+  base_begin_ = aligned_base_begin;
+  base_size_ = aligned_base_size;
+  begin_ = aligned_base_begin;
+  size_ = aligned_base_size;
+  DCHECK(maps_ != nullptr);
+  if (base_begin < aligned_base_begin) {
+    auto it = maps_->find(base_begin);
+    CHECK(it != maps_->end()) << "MemMap not found";
+    maps_->erase(it);
+    maps_->insert(std::make_pair(base_begin_, this));
+  }
+}
+
 }  // namespace art
diff --git a/runtime/mem_map.h b/runtime/mem_map.h
index 71db3f7..ceb4c33 100644
--- a/runtime/mem_map.h
+++ b/runtime/mem_map.h
@@ -193,6 +193,9 @@
   // intermittently.
   void TryReadable();
 
+  // Align the map by unmapping the unaligned parts at the lower and the higher ends.
+  void AlignBy(size_t size);
+
  private:
   MemMap(const std::string& name,
          uint8_t* begin,
@@ -222,10 +225,10 @@
                            bool low_4gb);
 
   const std::string name_;
-  uint8_t* const begin_;  // Start of data.
+  uint8_t* begin_;  // Start of data. May be changed by AlignBy.
   size_t size_;  // Length of data.
 
-  void* const base_begin_;  // Page-aligned base address.
+  void* base_begin_;  // Page-aligned base address. May be changed by AlignBy.
   size_t base_size_;  // Length of mapping. May be changed by RemapAtEnd (ie Zygote).
   int prot_;  // Protection of the map.
 
diff --git a/runtime/mem_map_test.cc b/runtime/mem_map_test.cc
index e703b78..aa306ac 100644
--- a/runtime/mem_map_test.cc
+++ b/runtime/mem_map_test.cc
@@ -431,4 +431,108 @@
   ASSERT_FALSE(MemMap::CheckNoGaps(map0.get(), map2.get()));
 }
 
+TEST_F(MemMapTest, AlignBy) {
+  CommonInit();
+  std::string error_msg;
+  // Cast the page size to size_t.
+  const size_t page_size = static_cast<size_t>(kPageSize);
+  // Map a region.
+  std::unique_ptr<MemMap> m0(MemMap::MapAnonymous("MemMapTest_AlignByTest_map0",
+                                                  nullptr,
+                                                  14 * page_size,
+                                                  PROT_READ | PROT_WRITE,
+                                                  false,
+                                                  false,
+                                                  &error_msg));
+  uint8_t* base0 = m0->Begin();
+  ASSERT_TRUE(base0 != nullptr) << error_msg;
+  ASSERT_EQ(m0->Size(), 14 * page_size);
+  ASSERT_EQ(BaseBegin(m0.get()), base0);
+  ASSERT_EQ(BaseSize(m0.get()), m0->Size());
+
+  // Break it into several regions by using RemapAtEnd.
+  std::unique_ptr<MemMap> m1(m0->RemapAtEnd(base0 + 3 * page_size,
+                                            "MemMapTest_AlignByTest_map1",
+                                            PROT_READ | PROT_WRITE,
+                                            &error_msg));
+  uint8_t* base1 = m1->Begin();
+  ASSERT_TRUE(base1 != nullptr) << error_msg;
+  ASSERT_EQ(base1, base0 + 3 * page_size);
+  ASSERT_EQ(m0->Size(), 3 * page_size);
+
+  std::unique_ptr<MemMap> m2(m1->RemapAtEnd(base1 + 4 * page_size,
+                                            "MemMapTest_AlignByTest_map2",
+                                            PROT_READ | PROT_WRITE,
+                                            &error_msg));
+  uint8_t* base2 = m2->Begin();
+  ASSERT_TRUE(base2 != nullptr) << error_msg;
+  ASSERT_EQ(base2, base1 + 4 * page_size);
+  ASSERT_EQ(m1->Size(), 4 * page_size);
+
+  std::unique_ptr<MemMap> m3(m2->RemapAtEnd(base2 + 3 * page_size,
+                                            "MemMapTest_AlignByTest_map1",
+                                            PROT_READ | PROT_WRITE,
+                                            &error_msg));
+  uint8_t* base3 = m3->Begin();
+  ASSERT_TRUE(base3 != nullptr) << error_msg;
+  ASSERT_EQ(base3, base2 + 3 * page_size);
+  ASSERT_EQ(m2->Size(), 3 * page_size);
+  ASSERT_EQ(m3->Size(), 4 * page_size);
+
+  uint8_t* end0 = base0 + m0->Size();
+  uint8_t* end1 = base1 + m1->Size();
+  uint8_t* end2 = base2 + m2->Size();
+  uint8_t* end3 = base3 + m3->Size();
+
+  ASSERT_EQ(static_cast<size_t>(end3 - base0), 14 * page_size);
+
+  if (IsAlignedParam(base0, 2 * page_size)) {
+    ASSERT_FALSE(IsAlignedParam(base1, 2 * page_size));
+    ASSERT_FALSE(IsAlignedParam(base2, 2 * page_size));
+    ASSERT_TRUE(IsAlignedParam(base3, 2 * page_size));
+    ASSERT_TRUE(IsAlignedParam(end3, 2 * page_size));
+  } else {
+    ASSERT_TRUE(IsAlignedParam(base1, 2 * page_size));
+    ASSERT_TRUE(IsAlignedParam(base2, 2 * page_size));
+    ASSERT_FALSE(IsAlignedParam(base3, 2 * page_size));
+    ASSERT_FALSE(IsAlignedParam(end3, 2 * page_size));
+  }
+
+  // Align by 2 * page_size;
+  m0->AlignBy(2 * page_size);
+  m1->AlignBy(2 * page_size);
+  m2->AlignBy(2 * page_size);
+  m3->AlignBy(2 * page_size);
+
+  EXPECT_TRUE(IsAlignedParam(m0->Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1->Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2->Begin(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3->Begin(), 2 * page_size));
+
+  EXPECT_TRUE(IsAlignedParam(m0->Begin() + m0->Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m1->Begin() + m1->Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m2->Begin() + m2->Size(), 2 * page_size));
+  EXPECT_TRUE(IsAlignedParam(m3->Begin() + m3->Size(), 2 * page_size));
+
+  if (IsAlignedParam(base0, 2 * page_size)) {
+    EXPECT_EQ(m0->Begin(), base0);
+    EXPECT_EQ(m0->Begin() + m0->Size(), end0 - page_size);
+    EXPECT_EQ(m1->Begin(), base1 + page_size);
+    EXPECT_EQ(m1->Begin() + m1->Size(), end1 - page_size);
+    EXPECT_EQ(m2->Begin(), base2 + page_size);
+    EXPECT_EQ(m2->Begin() + m2->Size(), end2);
+    EXPECT_EQ(m3->Begin(), base3);
+    EXPECT_EQ(m3->Begin() + m3->Size(), end3);
+  } else {
+    EXPECT_EQ(m0->Begin(), base0 + page_size);
+    EXPECT_EQ(m0->Begin() + m0->Size(), end0);
+    EXPECT_EQ(m1->Begin(), base1);
+    EXPECT_EQ(m1->Begin() + m1->Size(), end1);
+    EXPECT_EQ(m2->Begin(), base2);
+    EXPECT_EQ(m2->Begin() + m2->Size(), end2 - page_size);
+    EXPECT_EQ(m3->Begin(), base3 + page_size);
+    EXPECT_EQ(m3->Begin() + m3->Size(), end3 - page_size);
+  }
+}
+
 }  // namespace art