ART: Compacting ROS/DlMalloc spaces with semispace copy GC

Current semispace copy GC is mainly associated with bump pointer
spaces. Though it squeezes fragmentation most aggressively, an extra
copy is required to re-establish the data in the ROS/DlMalloc space to allow
CMS GCs to happen afterwards. As semispace copy GC is still stop-the-world,
this not only introduces unnecessary overheads but also longer response time.
Response time indicates the time duration between the start of transition
request and the start of transition animation, which may impact the user
experience.

Using semispace copy GC to compact the data in a ROS space to another ROS(or
DlMalloc space to another DlMalloc) space solves this problem. Although it
squeezes less fragmentation, CMS GCs can run immediately after the compaction.

We apply this algorithm in two cases:
1) Right before throwing an OOM if -XX:EnableHSpaceCompactForOOM is passed in
as true.
2) When app is switched to background if the -XX:BackgroundGC option has value
HSpaceCompact.

For case 1), OOMs are significantly delayed in the harmony GC stress test,
with compaction ratio up to 0.87. For case 2), compaction ratio around 0.5 is
observed in both built-in SMS and browser. Similar results have been obtained
on other apps as well.

Change-Id: Iad9eabc6d046659fda3535ae20f21bc31f89ded3
Signed-off-by: Wang, Zuo <zuo.wang@intel.com>
Signed-off-by: Chang, Yang <yang.chang@intel.com>
Signed-off-by: Lei Li <lei.l.li@intel.com>
Signed-off-by: Lin Zang <lin.zang@intel.com>
diff --git a/runtime/gc/heap.cc b/runtime/gc/heap.cc
index 19715e9..4ec9bc2 100644
--- a/runtime/gc/heap.cc
+++ b/runtime/gc/heap.cc
@@ -93,6 +93,10 @@
 static constexpr size_t kAllocationStackReserveSize = 1024;
 // Default mark stack size in bytes.
 static const size_t kDefaultMarkStackSize = 64 * KB;
+// Define space name.
+static const char* kDlMallocSpaceName[2] = {"main dlmalloc space", "main dlmalloc space 1"};
+static const char* kRosAllocSpaceName[2] = {"main rosalloc space", "main rosalloc space 1"};
+static const char* kMemMapSpaceName[2] = {"main space", "main space 1"};
 
 Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max_free,
            double target_utilization, double foreground_heap_growth_multiplier, size_t capacity,
@@ -103,7 +107,8 @@
            bool ignore_max_footprint, bool use_tlab,
            bool verify_pre_gc_heap, bool verify_pre_sweeping_heap, bool verify_post_gc_heap,
            bool verify_pre_gc_rosalloc, bool verify_pre_sweeping_rosalloc,
-           bool verify_post_gc_rosalloc)
+           bool verify_post_gc_rosalloc, bool use_homogeneous_space_compaction_for_oom,
+           uint64_t min_interval_homogeneous_space_compaction_by_oom)
     : non_moving_space_(nullptr),
       rosalloc_space_(nullptr),
       dlmalloc_space_(nullptr),
@@ -173,7 +178,11 @@
       verify_object_mode_(kVerifyObjectModeDisabled),
       disable_moving_gc_count_(0),
       running_on_valgrind_(Runtime::Current()->RunningOnValgrind()),
-      use_tlab_(use_tlab) {
+      use_tlab_(use_tlab),
+      main_space_backup_(nullptr),
+      min_interval_homogeneous_space_compaction_by_oom_(min_interval_homogeneous_space_compaction_by_oom),
+      last_time_homogeneous_space_compaction_by_oom_(NanoTime()),
+      use_homogeneous_space_compaction_for_oom_(use_homogeneous_space_compaction_for_oom) {
   if (VLOG_IS_ON(heap) || VLOG_IS_ON(startup)) {
     LOG(INFO) << "Heap() entering";
   }
@@ -205,30 +214,90 @@
     CHECK_GT(oat_file_end_addr, image_space->End());
     requested_alloc_space_begin = AlignUp(oat_file_end_addr, kPageSize);
   }
+
+  /*
+  requested_alloc_space_begin ->     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-  nonmoving space (kNonMovingSpaceCapacity) +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-        main alloc space (capacity_)        +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+                                     +-       main alloc space 1 (capacity_)       +-
+                                     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
+  */
+  bool create_backup_main_space =
+      background_collector_type == gc::kCollectorTypeHomogeneousSpaceCompact ||
+      use_homogeneous_space_compaction_for_oom;
   if (is_zygote) {
     // Reserve the address range before we create the non moving space to make sure bitmaps don't
     // take it.
     std::string error_str;
-    MemMap* mem_map = MemMap::MapAnonymous(
-        "main space", requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity,
+    MemMap* main_space_map = MemMap::MapAnonymous(
+        kMemMapSpaceName[0], requested_alloc_space_begin + kNonMovingSpaceCapacity, capacity_,
         PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(mem_map != nullptr) << error_str;
+    CHECK(main_space_map != nullptr) << error_str;
+    MemMap* main_space_1_map = nullptr;
+    // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
+    if (create_backup_main_space) {
+      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], main_space_map->End(), capacity_,
+                                               PROT_READ | PROT_WRITE, true, &error_str);
+      if (main_space_1_map == nullptr) {
+        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
+                     << error_str;
+      }
+    }
     // Non moving space is always dlmalloc since we currently don't have support for multiple
-    // rosalloc spaces.
+    // active rosalloc spaces.
     non_moving_space_ = space::DlMallocSpace::Create(
-        "zygote / non moving space", initial_size, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
-        requested_alloc_space_begin, false);
+        "zygote / non moving space", initial_size, kNonMovingSpaceCapacity,
+        kNonMovingSpaceCapacity, requested_alloc_space_begin, false);
     non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
-    CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
+    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
+    if (main_space_1_map != nullptr) {
+      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
+                                                       growth_limit_, capacity_, name, true);
+    }
   } else {
     std::string error_str;
-    MemMap* mem_map = MemMap::MapAnonymous("main/non-moving space", requested_alloc_space_begin,
-                                           capacity, PROT_READ | PROT_WRITE, true, &error_str);
-    CHECK(mem_map != nullptr) << error_str;
+    byte* request_begin = requested_alloc_space_begin;
+    if (request_begin == nullptr) {
+      // Disable homogeneous space compaction since we don't have an image.
+      create_backup_main_space = false;
+    }
+    MemMap* main_space_1_map = nullptr;
+    if (create_backup_main_space) {
+      request_begin += kNonMovingSpaceCapacity;
+      // Attempt to reserve an extra mem_map for homogeneous space compaction right after the main space map.
+      main_space_1_map = MemMap::MapAnonymous(kMemMapSpaceName[1], request_begin + capacity_,
+                                               capacity_, PROT_READ | PROT_WRITE, true, &error_str);
+      if (main_space_1_map == nullptr) {
+        LOG(WARNING) << "Failed to create map " <<  kMemMapSpaceName[1] << " with error "
+                     << error_str;
+        request_begin = requested_alloc_space_begin;
+      }
+    }
+    MemMap* main_space_map = MemMap::MapAnonymous(kMemMapSpaceName[0], request_begin, capacity_,
+                                                  PROT_READ | PROT_WRITE, true, &error_str);
+    CHECK(main_space_map != nullptr) << error_str;
+    // Introduce a seperate non moving space.
+    if (main_space_1_map != nullptr) {
+      // Do this before creating the main malloc space to prevent bitmaps from being placed here.
+      non_moving_space_ = space::DlMallocSpace::Create(
+          "non moving space", kDefaultInitialSize, kNonMovingSpaceCapacity, kNonMovingSpaceCapacity,
+          requested_alloc_space_begin, false);
+      non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
+    }
     // Create the main free list space, which doubles as the non moving space. We can do this since
     // non zygote means that we won't have any background compaction.
-    CreateMainMallocSpace(mem_map, initial_size, growth_limit, capacity);
-    non_moving_space_ = main_space_;
+    CreateMainMallocSpace(main_space_map, initial_size, growth_limit_, capacity_);
+    if (main_space_1_map != nullptr) {
+      const char* name = kUseRosAlloc ? kRosAllocSpaceName[1] : kDlMallocSpaceName[1];
+      main_space_backup_ = CreateMallocSpaceFromMemMap(main_space_1_map, initial_size,
+                                                       growth_limit_, capacity_, name, true);
+      CHECK(main_space_backup_ != nullptr);
+    } else {
+      non_moving_space_ = main_space_;
+    }
   }
   CHECK(non_moving_space_ != nullptr);
 
@@ -240,7 +309,7 @@
       (IsMovingGc(foreground_collector_type_) || IsMovingGc(background_collector_type_))) {
     // TODO: Place bump-pointer spaces somewhere to minimize size of card table.
     // Divide by 2 for a temporary fix for reducing virtual memory usage.
-    const size_t bump_pointer_space_capacity = capacity / 2;
+    const size_t bump_pointer_space_capacity = capacity_ / 2;
     bump_pointer_space_ = space::BumpPointerSpace::Create("Bump pointer space",
                                                           bump_pointer_space_capacity, nullptr);
     CHECK(bump_pointer_space_ != nullptr) << "Failed to create bump pointer space";
@@ -253,13 +322,25 @@
   if (non_moving_space_ != main_space_) {
     AddSpace(non_moving_space_);
   }
+  if (main_space_backup_ != nullptr) {
+    AddSpace(main_space_backup_);
+  } else {
+    const char* disable_msg = "Disabling homogenous space compact due to no backup main space";
+    if (background_collector_type_ == gc::kCollectorTypeHomogeneousSpaceCompact) {
+      background_collector_type_ = collector_type_;
+      LOG(WARNING) << disable_msg;
+    } else if (use_homogeneous_space_compaction_for_oom_) {
+      LOG(WARNING) << disable_msg;
+    }
+    use_homogeneous_space_compaction_for_oom_ = false;
+  }
   if (main_space_ != nullptr) {
     AddSpace(main_space_);
   }
 
   // Allocate the large object space.
   if (kUseFreeListSpaceForLOS) {
-    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity);
+    large_object_space_ = space::FreeListSpace::Create("large object space", nullptr, capacity_);
   } else {
     large_object_space_ = space::LargeObjectMapSpace::Create("large object space");
   }
@@ -328,7 +409,7 @@
   }
   if (kMovingCollector) {
     // TODO: Clean this up.
-    bool generational = foreground_collector_type_ == kCollectorTypeGSS;
+    const bool generational = foreground_collector_type_ == kCollectorTypeGSS;
     semi_space_collector_ = new collector::SemiSpace(this, generational,
                                                      generational ? "generational" : "");
     garbage_collectors_.push_back(semi_space_collector_);
@@ -339,9 +420,8 @@
   }
 
   if (GetImageSpace() != nullptr && main_space_ != nullptr) {
-    // Check that there's no gap between the image space and the main
-    // space so that the immune region won't break (eg. due to a large
-    // object allocated in the gap).
+    // Check that there's no gap between the image space and the main space so that the immune
+    // region won't break (eg. due to a large object allocated in the gap).
     bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(), main_space_->GetMemMap());
     if (!no_gap) {
       MemMap::DumpMaps(LOG(ERROR));
@@ -358,11 +438,36 @@
   }
 }
 
+space::MallocSpace* Heap::CreateMallocSpaceFromMemMap(MemMap* mem_map, size_t initial_size,
+                                                      size_t growth_limit, size_t capacity,
+                                                      const char* name, bool can_move_objects) {
+  space::MallocSpace* malloc_space = nullptr;
+  if (kUseRosAlloc) {
+    // Create rosalloc space.
+    malloc_space = space::RosAllocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
+                                                          initial_size, growth_limit, capacity,
+                                                          low_memory_mode_, can_move_objects);
+  } else {
+    malloc_space = space::DlMallocSpace::CreateFromMemMap(mem_map, name, kDefaultStartingSize,
+                                                          initial_size, growth_limit, capacity,
+                                                          can_move_objects);
+  }
+  if (collector::SemiSpace::kUseRememberedSet) {
+    accounting::RememberedSet* rem_set  =
+        new accounting::RememberedSet(std::string(name) + " remembered set", this, malloc_space);
+    CHECK(rem_set != nullptr) << "Failed to create main space remembered set";
+    AddRememberedSet(rem_set);
+  }
+  CHECK(malloc_space != nullptr) << "Failed to create " << name;
+  malloc_space->SetFootprintLimit(malloc_space->Capacity());
+  return malloc_space;
+}
+
 void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t growth_limit,
                                  size_t capacity) {
   // Is background compaction is enabled?
   bool can_move_objects = IsMovingGc(background_collector_type_) !=
-      IsMovingGc(foreground_collector_type_);
+      IsMovingGc(foreground_collector_type_) || use_homogeneous_space_compaction_for_oom_;
   // If we are the zygote and don't yet have a zygote space, it means that the zygote fork will
   // happen in the future. If this happens and we have kCompactZygote enabled we wish to compact
   // from the main space to the zygote space. If background compaction is enabled, always pass in
@@ -375,26 +480,10 @@
   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
     RemoveRememberedSet(main_space_);
   }
-  if (kUseRosAlloc) {
-    rosalloc_space_ = space::RosAllocSpace::CreateFromMemMap(
-        mem_map, "main rosalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
-        low_memory_mode_, can_move_objects);
-    main_space_ = rosalloc_space_;
-    CHECK(main_space_ != nullptr) << "Failed to create rosalloc space";
-  } else {
-    dlmalloc_space_ = space::DlMallocSpace::CreateFromMemMap(
-        mem_map, "main dlmalloc space", kDefaultStartingSize, initial_size, growth_limit, capacity,
-        can_move_objects);
-    main_space_ = dlmalloc_space_;
-    CHECK(main_space_ != nullptr) << "Failed to create dlmalloc space";
-  }
-  main_space_->SetFootprintLimit(main_space_->Capacity());
-  if (collector::SemiSpace::kUseRememberedSet) {
-    accounting::RememberedSet* main_space_rem_set =
-        new accounting::RememberedSet("Main space remembered set", this, main_space_);
-    CHECK(main_space_rem_set != nullptr) << "Failed to create main space remembered set";
-    AddRememberedSet(main_space_rem_set);
-  }
+  const char* name = kUseRosAlloc ? kRosAllocSpaceName[0] : kDlMallocSpaceName[0];
+  main_space_ = CreateMallocSpaceFromMemMap(mem_map, initial_size, growth_limit, capacity, name,
+                                            can_move_objects);
+  SetSpaceAsDefault(main_space_);
   VLOG(heap) << "Created main space " << main_space_;
 }
 
@@ -547,8 +636,11 @@
       RequestCollectorTransition(foreground_collector_type_, 0);
     } else {
       // Don't delay for debug builds since we may want to stress test the GC.
-      RequestCollectorTransition(background_collector_type_, kIsDebugBuild ? 0 :
-          kCollectorTransitionWait);
+      // If background_collector_type_ is kCollectorTypeHomogeneousSpaceCompact then we have
+      // special handling which does a homogenous space compaction once but then doesn't transition
+      // the collector.
+      RequestCollectorTransition(background_collector_type_,
+                                 kIsDebugBuild ? 0 : kCollectorTransitionWait);
     }
   }
 }
@@ -605,7 +697,7 @@
 }
 
 void Heap::AddSpace(space::Space* space) {
-  DCHECK(space != nullptr);
+  CHECK(space != nullptr);
   WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
   if (space->IsContinuousSpace()) {
     DCHECK(!space->IsDiscontinuousSpace());
@@ -811,7 +903,7 @@
   oss << "Failed to allocate a " << byte_count << " byte allocation with " << total_bytes_free
       << " free bytes";
   // If the allocation failed due to fragmentation, print out the largest continuous allocation.
-  if (allocator_type != kAllocatorTypeLOS && total_bytes_free >= byte_count) {
+  if (total_bytes_free >= byte_count) {
     space::MallocSpace* space = nullptr;
     if (allocator_type == kAllocatorTypeNonMoving) {
       space = non_moving_space_;
@@ -844,6 +936,15 @@
     ScopedThreadStateChange tsc(self, kSleeping);
     usleep(wait_time / 1000);  // Usleep takes microseconds.
   }
+  // Launch homogeneous space compaction if it is desired.
+  if (desired_collector_type == kCollectorTypeHomogeneousSpaceCompact) {
+    if (!CareAboutPauseTimes()) {
+      PerformHomogeneousSpaceCompact();
+    }
+    // No need to Trim(). Homogeneous space compaction may free more virtual and physical memory.
+    desired_collector_type = collector_type_;
+    return;
+  }
   // Transition the collector if the desired collector type is not the same as the current
   // collector type.
   TransitionCollector(desired_collector_type);
@@ -1092,6 +1193,17 @@
   }
 }
 
+space::RosAllocSpace* Heap::GetRosAllocSpace(gc::allocator::RosAlloc* rosalloc) const {
+  for (const auto& space : continuous_spaces_) {
+    if (space->AsContinuousSpace()->IsRosAllocSpace()) {
+      if (space->AsContinuousSpace()->AsRosAllocSpace()->GetRosAlloc() == rosalloc) {
+        return space->AsContinuousSpace()->AsRosAllocSpace();
+      }
+    }
+  }
+  return nullptr;
+}
+
 mirror::Object* Heap::AllocateInternalWithGc(Thread* self, AllocatorType allocator,
                                              size_t alloc_size, size_t* bytes_allocated,
                                              size_t* usable_size,
@@ -1173,6 +1285,44 @@
     return nullptr;
   }
   ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+  if (ptr == nullptr && use_homogeneous_space_compaction_for_oom_) {
+    const uint64_t current_time = NanoTime();
+    if ((allocator == kAllocatorTypeRosAlloc || allocator == kAllocatorTypeDlMalloc) &&
+        current_time - last_time_homogeneous_space_compaction_by_oom_ >
+        min_interval_homogeneous_space_compaction_by_oom_) {
+      last_time_homogeneous_space_compaction_by_oom_ = current_time;
+      HomogeneousSpaceCompactResult result = PerformHomogeneousSpaceCompact();
+      switch (result) {
+        case HomogeneousSpaceCompactResult::kSuccess:
+          // If the allocation succeeded, we delayed an oom.
+          ptr = TryToAllocate<true, true>(self, allocator, alloc_size, bytes_allocated, usable_size);
+          if (ptr != nullptr) {
+            count_delayed_oom_++;
+          }
+          break;
+        case HomogeneousSpaceCompactResult::kErrorReject:
+          // Reject due to disabled moving GC.
+          break;
+        case HomogeneousSpaceCompactResult::kErrorVMShuttingDown:
+          // Throw OOM by default.
+          break;
+        default: {
+          LOG(FATAL) << "Unimplemented homogeneous space compaction result " << static_cast<size_t>(result);
+        }
+      }
+      // Always print that we ran homogeneous space compation since this can cause jank.
+      VLOG(heap) << "Ran heap homogeneous space compaction, "
+                << " requested defragmentation "
+                << count_requested_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " performed defragmentation "
+                << count_performed_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " ignored homogeneous space compaction "
+                << count_ignored_homogeneous_space_compaction_.LoadSequentiallyConsistent()
+                << " delayed count = "
+                << count_delayed_oom_.LoadSequentiallyConsistent();
+    }
+  }
+  // If the allocation hasn't succeeded by this point, throw an OOM error.
   if (ptr == nullptr) {
     ThrowOutOfMemoryError(self, alloc_size, allocator);
   }
@@ -1331,6 +1481,66 @@
   CollectGarbageInternal(gc_plan_.back(), kGcCauseExplicit, clear_soft_references);
 }
 
+HomogeneousSpaceCompactResult Heap::PerformHomogeneousSpaceCompact() {
+  Thread* self = Thread::Current();
+  // Inc requested homogeneous space compaction.
+  count_requested_homogeneous_space_compaction_++;
+  // Store performed homogeneous space compaction at a new request arrival.
+  ThreadList* tl = Runtime::Current()->GetThreadList();
+  ScopedThreadStateChange tsc(self, kWaitingPerformingGc);
+  Locks::mutator_lock_->AssertNotHeld(self);
+  {
+    ScopedThreadStateChange tsc(self, kWaitingForGcToComplete);
+    MutexLock mu(self, *gc_complete_lock_);
+    // Ensure there is only one GC at a time.
+    WaitForGcToCompleteLocked(kGcCauseHomogeneousSpaceCompact, self);
+    // Homogeneous space compaction is a copying transition, can't run it if the moving GC disable count
+    // is non zero.
+    // If the collecotr type changed to something which doesn't benefit from homogeneous space compaction,
+    // exit.
+    if (disable_moving_gc_count_ != 0 || IsMovingGc(collector_type_)) {
+      return HomogeneousSpaceCompactResult::kErrorReject;
+    }
+    collector_type_running_ = kCollectorTypeHomogeneousSpaceCompact;
+  }
+  if (Runtime::Current()->IsShuttingDown(self)) {
+    // Don't allow heap transitions to happen if the runtime is shutting down since these can
+    // cause objects to get finalized.
+    FinishGC(self, collector::kGcTypeNone);
+    return HomogeneousSpaceCompactResult::kErrorVMShuttingDown;
+  }
+  // Suspend all threads.
+  tl->SuspendAll();
+  uint64_t start_time = NanoTime();
+  // Launch compaction.
+  space::MallocSpace* to_space = main_space_backup_;
+  space::MallocSpace* from_space = main_space_;
+  to_space->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
+  const uint64_t space_size_before_compaction = from_space->Size();
+  Compact(to_space, from_space, kGcCauseHomogeneousSpaceCompact);
+  // Leave as prot read so that we can still run ROSAlloc verification on this space.
+  from_space->GetMemMap()->Protect(PROT_READ);
+  const uint64_t space_size_after_compaction = to_space->Size();
+  std::swap(main_space_, main_space_backup_);
+  SetSpaceAsDefault(main_space_);  // Set as default to reset the proper dlmalloc space.
+  // Update performed homogeneous space compaction count.
+  count_performed_homogeneous_space_compaction_++;
+  // Print statics log and resume all threads.
+  uint64_t duration = NanoTime() - start_time;
+  LOG(INFO) << "Heap homogeneous space compaction took " << PrettyDuration(duration) << " size: "
+            << PrettySize(space_size_before_compaction) << " -> "
+            << PrettySize(space_size_after_compaction) << " compact-ratio: "
+            << std::fixed << static_cast<double>(space_size_after_compaction) /
+            static_cast<double>(space_size_before_compaction);
+  tl->ResumeAll();
+  // Finish GC.
+  reference_processor_.EnqueueClearedReferences(self);
+  GrowForUtilization(semi_space_collector_);
+  FinishGC(self, collector::kGcTypeFull);
+  return HomogeneousSpaceCompactResult::kSuccess;
+}
+
+
 void Heap::TransitionCollector(CollectorType collector_type) {
   if (collector_type == collector_type_) {
     return;
@@ -1385,7 +1595,7 @@
         // We are transitioning from non moving GC -> moving GC, since we copied from the bump
         // pointer space last transition it will be protected.
         bump_pointer_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-        Compact(bump_pointer_space_, main_space_);
+        Compact(bump_pointer_space_, main_space_, kGcCauseCollectorTransition);
         // Remove the main space so that we don't try to trim it, this doens't work for debug
         // builds since RosAlloc attempts to read the magic number from a protected page.
         RemoveSpace(main_space_);
@@ -1399,7 +1609,7 @@
         // Compact to the main space from the bump pointer space, don't need to swap semispaces.
         AddSpace(main_space_);
         main_space_->GetMemMap()->Protect(PROT_READ | PROT_WRITE);
-        Compact(main_space_, bump_pointer_space_);
+        Compact(main_space_, bump_pointer_space_, kGcCauseCollectorTransition);
       }
       break;
     }
@@ -1725,14 +1935,15 @@
 }
 
 void Heap::Compact(space::ContinuousMemMapAllocSpace* target_space,
-                   space::ContinuousMemMapAllocSpace* source_space) {
+                   space::ContinuousMemMapAllocSpace* source_space,
+                   GcCause gc_cause) {
   CHECK(kMovingCollector);
   if (target_space != source_space) {
     // Don't swap spaces since this isn't a typical semi space collection.
     semi_space_collector_->SetSwapSemiSpaces(false);
     semi_space_collector_->SetFromSpace(source_space);
     semi_space_collector_->SetToSpace(target_space);
-    semi_space_collector_->Run(kGcCauseCollectorTransition, false);
+    semi_space_collector_->Run(gc_cause, false);
   } else {
     CHECK(target_space->IsBumpPointerSpace())
         << "In-place compaction is only supported for bump pointer spaces";