Compacting collector.

The compacting collector is currently similar to semispace. It works by
copying objects back and forth between two bump pointer spaces. There
are types of objects which are "non-movable" due to current runtime
limitations. These are Classes, Methods, and Fields.

Bump pointer spaces are a new type of continuous alloc space which have
no lock in the allocation code path. When you allocate from these it uses
atomic operations to increase an index. Traversing the objects in the bump
pointer space relies on Object::SizeOf matching the allocated size exactly.

Runtime changes:
JNI::GetArrayElements returns copies objects if you attempt to get the
backing data of a movable array. For GetArrayElementsCritical, we return
direct backing storage for any types of arrays, but temporarily disable
the GC until the critical region is completed.

Added a new runtime call called VisitObjects, this is used in place of
the old pattern which was flushing the allocation stack and walking
the bitmaps.

Changed image writer to be compaction safe and use object monitor word
for forwarding addresses.

Added a bunch of added SIRTs to ClassLinker, MethodLinker, etc..

TODO: Enable switching allocators, compacting on background, etc..

Bug: 8981901

Change-Id: I3c886fd322a6eef2b99388d19a765042ec26ab99
diff --git a/runtime/gc/collector/semi_space.cc b/runtime/gc/collector/semi_space.cc
new file mode 100644
index 0000000..d833631
--- /dev/null
+++ b/runtime/gc/collector/semi_space.cc
@@ -0,0 +1,799 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "semi_space.h"
+
+#include <functional>
+#include <numeric>
+#include <climits>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/mutex-inl.h"
+#include "base/timing_logger.h"
+#include "gc/accounting/heap_bitmap.h"
+#include "gc/accounting/mod_union_table.h"
+#include "gc/accounting/space_bitmap-inl.h"
+#include "gc/heap.h"
+#include "gc/space/bump_pointer_space.h"
+#include "gc/space/bump_pointer_space-inl.h"
+#include "gc/space/image_space.h"
+#include "gc/space/large_object_space.h"
+#include "gc/space/space-inl.h"
+#include "indirect_reference_table.h"
+#include "intern_table.h"
+#include "jni_internal.h"
+#include "mark_sweep-inl.h"
+#include "monitor.h"
+#include "mirror/art_field.h"
+#include "mirror/art_field-inl.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "mirror/dex_cache.h"
+#include "mirror/object-inl.h"
+#include "mirror/object_array.h"
+#include "mirror/object_array-inl.h"
+#include "runtime.h"
+#include "semi_space-inl.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+#include "verifier/method_verifier.h"
+
+using ::art::mirror::Class;
+using ::art::mirror::Object;
+
+namespace art {
+namespace gc {
+namespace collector {
+
+static constexpr bool kProtectFromSpace = true;
+static constexpr bool kResetFromSpace = true;
+
+// TODO: Unduplicate logic.
+void SemiSpace::ImmuneSpace(space::ContinuousSpace* space) {
+  // Bind live to mark bitmap if necessary.
+  if (space->GetLiveBitmap() != space->GetMarkBitmap()) {
+    BindLiveToMarkBitmap(space);
+  }
+  // Add the space to the immune region.
+  if (immune_begin_ == nullptr) {
+    DCHECK(immune_end_ == nullptr);
+    immune_begin_ = reinterpret_cast<Object*>(space->Begin());
+    immune_end_ = reinterpret_cast<Object*>(space->End());
+  } else {
+    const space::ContinuousSpace* prev_space = nullptr;
+    // Find out if the previous space is immune.
+    for (space::ContinuousSpace* cur_space : GetHeap()->GetContinuousSpaces()) {
+      if (cur_space == space) {
+        break;
+      }
+      prev_space = cur_space;
+    }
+    // If previous space was immune, then extend the immune region. Relies on continuous spaces
+    // being sorted by Heap::AddContinuousSpace.
+    if (prev_space != nullptr && IsImmuneSpace(prev_space)) {
+      immune_begin_ = std::min(reinterpret_cast<Object*>(space->Begin()), immune_begin_);
+      immune_end_ = std::max(reinterpret_cast<Object*>(space->End()), immune_end_);
+    }
+  }
+}
+
+void SemiSpace::BindBitmaps() {
+  timings_.StartSplit("BindBitmaps");
+  WriterMutexLock mu(Thread::Current(), *Locks::heap_bitmap_lock_);
+  // Mark all of the spaces we never collect as immune.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyNeverCollect
+        || space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect) {
+      ImmuneSpace(space);
+    }
+  }
+  timings_.EndSplit();
+}
+
+SemiSpace::SemiSpace(Heap* heap, const std::string& name_prefix)
+    : GarbageCollector(heap,
+                       name_prefix + (name_prefix.empty() ? "" : " ") + "marksweep + semispace"),
+      mark_stack_(nullptr),
+      immune_begin_(nullptr),
+      immune_end_(nullptr),
+      to_space_(nullptr),
+      from_space_(nullptr),
+      soft_reference_list_(nullptr),
+      weak_reference_list_(nullptr),
+      finalizer_reference_list_(nullptr),
+      phantom_reference_list_(nullptr),
+      cleared_reference_list_(nullptr),
+      self_(nullptr) {
+}
+
+void SemiSpace::InitializePhase() {
+  timings_.Reset();
+  base::TimingLogger::ScopedSplit split("InitializePhase", &timings_);
+  mark_stack_ = heap_->mark_stack_.get();
+  DCHECK(mark_stack_ != nullptr);
+  immune_begin_ = nullptr;
+  immune_end_ = nullptr;
+  soft_reference_list_ = nullptr;
+  weak_reference_list_ = nullptr;
+  finalizer_reference_list_ = nullptr;
+  phantom_reference_list_ = nullptr;
+  cleared_reference_list_ = nullptr;
+  self_ = Thread::Current();
+  // Do any pre GC verification.
+  timings_.NewSplit("PreGcVerification");
+  heap_->PreGcVerification(this);
+}
+
+void SemiSpace::ProcessReferences(Thread* self) {
+  base::TimingLogger::ScopedSplit split("ProcessReferences", &timings_);
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  ProcessReferences(&soft_reference_list_, clear_soft_references_, &weak_reference_list_,
+                    &finalizer_reference_list_, &phantom_reference_list_);
+}
+
+void SemiSpace::MarkingPhase() {
+  Thread* self = Thread::Current();
+  Locks::mutator_lock_->AssertExclusiveHeld(self);
+  base::TimingLogger::ScopedSplit split("MarkingPhase", &timings_);
+  // Need to do this with mutators paused so that somebody doesn't accidentally allocate into the
+  // wrong space.
+  heap_->SwapSemiSpaces();
+  // Assume the cleared space is already empty.
+  BindBitmaps();
+  // Process dirty cards and add dirty cards to mod-union tables.
+  heap_->ProcessCards(timings_);
+  // Need to do this before the checkpoint since we don't want any threads to add references to
+  // the live stack during the recursive mark.
+  timings_.NewSplit("SwapStacks");
+  heap_->SwapStacks();
+  WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+  MarkRoots();
+  // Mark roots of immune spaces.
+  UpdateAndMarkModUnion();
+  // Recursively mark remaining objects.
+  MarkReachableObjects();
+}
+
+bool SemiSpace::IsImmuneSpace(const space::ContinuousSpace* space) const {
+  return
+    immune_begin_ <= reinterpret_cast<Object*>(space->Begin()) &&
+    immune_end_ >= reinterpret_cast<Object*>(space->End());
+}
+
+void SemiSpace::UpdateAndMarkModUnion() {
+  for (auto& space : heap_->GetContinuousSpaces()) {
+    // If the space is immune then we need to mark the references to other spaces.
+    if (IsImmuneSpace(space)) {
+      accounting::ModUnionTable* table = heap_->FindModUnionTableFromSpace(space);
+      CHECK(table != nullptr);
+      // TODO: Improve naming.
+      base::TimingLogger::ScopedSplit split(
+          space->IsZygoteSpace() ? "UpdateAndMarkZygoteModUnionTable" :
+                                   "UpdateAndMarkImageModUnionTable",
+                                   &timings_);
+      table->UpdateAndMarkReferences(MarkRootCallback, this);
+    }
+  }
+}
+
+void SemiSpace::MarkReachableObjects() {
+  timings_.StartSplit("MarkStackAsLive");
+  accounting::ObjectStack* live_stack = heap_->GetLiveStack();
+  heap_->MarkAllocStackAsLive(live_stack);
+  live_stack->Reset();
+  timings_.EndSplit();
+  // Recursively process the mark stack.
+  ProcessMarkStack(true);
+}
+
+void SemiSpace::ReclaimPhase() {
+  base::TimingLogger::ScopedSplit split("ReclaimPhase", &timings_);
+  Thread* self = Thread::Current();
+  ProcessReferences(self);
+  {
+    ReaderMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    SweepSystemWeaks();
+  }
+  // Record freed memory.
+  int from_bytes = from_space_->GetBytesAllocated();
+  int to_bytes = to_space_->GetBytesAllocated();
+  int from_objects = from_space_->GetObjectsAllocated();
+  int to_objects = to_space_->GetObjectsAllocated();
+  int freed_bytes = from_bytes - to_bytes;
+  int freed_objects = from_objects - to_objects;
+  CHECK_GE(freed_bytes, 0);
+  freed_bytes_.fetch_add(freed_bytes);
+  freed_objects_.fetch_add(freed_objects);
+  heap_->RecordFree(static_cast<size_t>(freed_objects), static_cast<size_t>(freed_bytes));
+
+  timings_.StartSplit("PreSweepingGcVerification");
+  heap_->PreSweepingGcVerification(this);
+  timings_.EndSplit();
+
+  {
+    WriterMutexLock mu(self, *Locks::heap_bitmap_lock_);
+    // Reclaim unmarked objects.
+    Sweep(false);
+    // Swap the live and mark bitmaps for each space which we modified space. This is an
+    // optimization that enables us to not clear live bits inside of the sweep. Only swaps unbound
+    // bitmaps.
+    timings_.StartSplit("SwapBitmaps");
+    SwapBitmaps();
+    timings_.EndSplit();
+    // Unbind the live and mark bitmaps.
+    UnBindBitmaps();
+  }
+  // Release the memory used by the from space.
+  if (kResetFromSpace) {
+    // Clearing from space.
+    from_space_->Clear();
+  }
+  // Protect the from space.
+  VLOG(heap)
+      << "mprotect region " << reinterpret_cast<void*>(from_space_->Begin()) << " - "
+      << reinterpret_cast<void*>(from_space_->Limit());
+  if (kProtectFromSpace) {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_NONE);
+  } else {
+    mprotect(from_space_->Begin(), from_space_->Capacity(), PROT_READ);
+  }
+}
+
+void SemiSpace::ResizeMarkStack(size_t new_size) {
+  std::vector<Object*> temp(mark_stack_->Begin(), mark_stack_->End());
+  CHECK_LE(mark_stack_->Size(), new_size);
+  mark_stack_->Resize(new_size);
+  for (const auto& obj : temp) {
+    mark_stack_->PushBack(obj);
+  }
+}
+
+inline void SemiSpace::MarkStackPush(Object* obj) {
+  if (UNLIKELY(mark_stack_->Size() >= mark_stack_->Capacity())) {
+    ResizeMarkStack(mark_stack_->Capacity() * 2);
+  }
+  // The object must be pushed on to the mark stack.
+  mark_stack_->PushBack(obj);
+}
+
+// Rare case, probably not worth inlining since it will increase instruction cache miss rate.
+bool SemiSpace::MarkLargeObject(const Object* obj) {
+  // TODO: support >1 discontinuous space.
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_objects = large_object_space->GetMarkObjects();
+  if (UNLIKELY(!large_objects->Test(obj))) {
+    large_objects->Set(obj);
+    return true;
+  }
+  return false;
+}
+
+// Used to mark and copy objects. Any newly-marked objects who are in the from space get moved to
+// the to-space and have their forward address updated. Objects which have been newly marked are
+// pushed on the mark stack.
+Object* SemiSpace::MarkObject(Object* obj) {
+  Object* ret = obj;
+  if (obj != nullptr && !IsImmune(obj)) {
+    if (from_space_->HasAddress(obj)) {
+      mirror::Object* forward_address = GetForwardingAddressInFromSpace(obj);
+      // If the object has already been moved, return the new forward address.
+      if (!to_space_->HasAddress(forward_address)) {
+        // Otherwise, we need to move the object and add it to the markstack for processing.
+        size_t object_size = obj->SizeOf();
+        size_t dummy = 0;
+        forward_address = to_space_->Alloc(self_, object_size, &dummy);
+        // Copy over the object and add it to the mark stack since we still need to update it's
+        // references.
+        memcpy(reinterpret_cast<void*>(forward_address), obj, object_size);
+        // Make sure to only update the forwarding address AFTER you copy the object so that the
+        // monitor word doesn't get stomped over.
+        COMPILE_ASSERT(sizeof(uint32_t) == sizeof(mirror::Object*),
+                       monitor_size_must_be_same_as_object);
+        obj->SetLockWord(LockWord::FromForwardingAddress(reinterpret_cast<size_t>(forward_address)));
+        MarkStackPush(forward_address);
+      }
+      ret = forward_address;
+      // TODO: Do we need this if in the else statement?
+    } else {
+      accounting::SpaceBitmap* object_bitmap = heap_->GetMarkBitmap()->GetContinuousSpaceBitmap(obj);
+      if (LIKELY(object_bitmap != nullptr)) {
+        // This object was not previously marked.
+        if (!object_bitmap->Test(obj)) {
+          object_bitmap->Set(obj);
+          MarkStackPush(obj);
+        }
+      } else {
+        DCHECK(!to_space_->HasAddress(obj)) << "Marking object in to_space_";
+        if (MarkLargeObject(obj)) {
+          MarkStackPush(obj);
+        }
+      }
+    }
+  }
+  return ret;
+}
+
+Object* SemiSpace::MarkRootCallback(Object* root, void* arg) {
+  DCHECK(root != nullptr);
+  DCHECK(arg != nullptr);
+  return reinterpret_cast<SemiSpace*>(arg)->MarkObject(root);
+}
+
+// Marks all objects in the root set.
+void SemiSpace::MarkRoots() {
+  timings_.StartSplit("MarkRoots");
+  // TODO: Visit up image roots as well?
+  Runtime::Current()->VisitRoots(MarkRootCallback, this, false, true);
+  timings_.EndSplit();
+}
+
+void SemiSpace::BindLiveToMarkBitmap(space::ContinuousSpace* space) {
+  CHECK(space->IsDlMallocSpace());
+  space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+  accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+  accounting::SpaceBitmap* mark_bitmap = alloc_space->BindLiveToMarkBitmap();
+  GetHeap()->GetMarkBitmap()->ReplaceBitmap(mark_bitmap, live_bitmap);
+}
+
+mirror::Object* SemiSpace::GetForwardingAddress(mirror::Object* obj) {
+  if (from_space_->HasAddress(obj)) {
+    LOG(FATAL) << "Shouldn't happen!";
+    return GetForwardingAddressInFromSpace(obj);
+  }
+  return obj;
+}
+
+mirror::Object* SemiSpace::SystemWeakIsMarkedCallback(Object* object, void* arg) {
+  return reinterpret_cast<SemiSpace*>(arg)->GetMarkedForwardAddress(object);
+}
+
+void SemiSpace::SweepSystemWeaks() {
+  timings_.StartSplit("SweepSystemWeaks");
+  Runtime::Current()->SweepSystemWeaks(SystemWeakIsMarkedCallback, this);
+  timings_.EndSplit();
+}
+
+struct SweepCallbackContext {
+  SemiSpace* mark_sweep;
+  space::AllocSpace* space;
+  Thread* self;
+};
+
+void SemiSpace::SweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  SemiSpace* gc = context->mark_sweep;
+  Heap* heap = gc->GetHeap();
+  space::AllocSpace* space = context->space;
+  Thread* self = context->self;
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(self);
+  size_t freed_bytes = space->FreeList(self, num_ptrs, ptrs);
+  heap->RecordFree(num_ptrs, freed_bytes);
+  gc->freed_objects_.fetch_add(num_ptrs);
+  gc->freed_bytes_.fetch_add(freed_bytes);
+}
+
+void SemiSpace::ZygoteSweepCallback(size_t num_ptrs, Object** ptrs, void* arg) {
+  SweepCallbackContext* context = static_cast<SweepCallbackContext*>(arg);
+  Locks::heap_bitmap_lock_->AssertExclusiveHeld(context->self);
+  Heap* heap = context->mark_sweep->GetHeap();
+  // We don't free any actual memory to avoid dirtying the shared zygote pages.
+  for (size_t i = 0; i < num_ptrs; ++i) {
+    Object* obj = static_cast<Object*>(ptrs[i]);
+    heap->GetLiveBitmap()->Clear(obj);
+    heap->GetCardTable()->MarkCard(obj);
+  }
+}
+
+void SemiSpace::Sweep(bool swap_bitmaps) {
+  DCHECK(mark_stack_->IsEmpty());
+  base::TimingLogger::ScopedSplit("Sweep", &timings_);
+
+  const bool partial = (GetGcType() == kGcTypePartial);
+  SweepCallbackContext scc;
+  scc.mark_sweep = this;
+  scc.self = Thread::Current();
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (!space->IsDlMallocSpace()) {
+      continue;
+    }
+    // We always sweep always collect spaces.
+    bool sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyAlwaysCollect);
+    if (!partial && !sweep_space) {
+      // We sweep full collect spaces when the GC isn't a partial GC (ie its full).
+      sweep_space = (space->GetGcRetentionPolicy() == space::kGcRetentionPolicyFullCollect);
+    }
+    if (sweep_space && space->IsDlMallocSpace()) {
+      uintptr_t begin = reinterpret_cast<uintptr_t>(space->Begin());
+      uintptr_t end = reinterpret_cast<uintptr_t>(space->End());
+      scc.space = space->AsDlMallocSpace();
+      accounting::SpaceBitmap* live_bitmap = space->GetLiveBitmap();
+      accounting::SpaceBitmap* mark_bitmap = space->GetMarkBitmap();
+      if (swap_bitmaps) {
+        std::swap(live_bitmap, mark_bitmap);
+      }
+      if (!space->IsZygoteSpace()) {
+        base::TimingLogger::ScopedSplit split("SweepAllocSpace", &timings_);
+        // Bitmaps are pre-swapped for optimization which enables sweeping with the heap unlocked.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &SweepCallback, reinterpret_cast<void*>(&scc));
+      } else {
+        base::TimingLogger::ScopedSplit split("SweepZygote", &timings_);
+        // Zygote sweep takes care of dirtying cards and clearing live bits, does not free actual
+        // memory.
+        accounting::SpaceBitmap::SweepWalk(*live_bitmap, *mark_bitmap, begin, end,
+                                           &ZygoteSweepCallback, reinterpret_cast<void*>(&scc));
+      }
+    }
+  }
+
+  SweepLargeObjects(swap_bitmaps);
+}
+
+void SemiSpace::SweepLargeObjects(bool swap_bitmaps) {
+  base::TimingLogger::ScopedSplit("SweepLargeObjects", &timings_);
+  // Sweep large objects
+  space::LargeObjectSpace* large_object_space = GetHeap()->GetLargeObjectsSpace();
+  accounting::SpaceSetMap* large_live_objects = large_object_space->GetLiveObjects();
+  accounting::SpaceSetMap* large_mark_objects = large_object_space->GetMarkObjects();
+  if (swap_bitmaps) {
+    std::swap(large_live_objects, large_mark_objects);
+  }
+  // O(n*log(n)) but hopefully there are not too many large objects.
+  size_t freed_objects = 0;
+  size_t freed_bytes = 0;
+  Thread* self = Thread::Current();
+  for (const Object* obj : large_live_objects->GetObjects()) {
+    if (!large_mark_objects->Test(obj)) {
+      freed_bytes += large_object_space->Free(self, const_cast<Object*>(obj));
+      ++freed_objects;
+    }
+  }
+  freed_large_objects_.fetch_add(freed_objects);
+  freed_large_object_bytes_.fetch_add(freed_bytes);
+  GetHeap()->RecordFree(freed_objects, freed_bytes);
+}
+
+// Process the "referent" field in a java.lang.ref.Reference.  If the referent has not yet been
+// marked, put it on the appropriate list in the heap for later processing.
+void SemiSpace::DelayReferenceReferent(mirror::Class* klass, Object* obj) {
+  DCHECK(klass != nullptr);
+  DCHECK(klass->IsReferenceClass());
+  DCHECK(obj != nullptr);
+  Object* referent = heap_->GetReferenceReferent(obj);
+  if (referent != nullptr) {
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    if (forward_address == nullptr) {
+      Thread* self = Thread::Current();
+      // TODO: Remove these locks, and use atomic stacks for storing references?
+      // We need to check that the references haven't already been enqueued since we can end up
+      // scanning the same reference multiple times due to dirty cards.
+      if (klass->IsSoftReferenceClass()) {
+        MutexLock mu(self, *heap_->GetSoftRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &soft_reference_list_);
+        }
+      } else if (klass->IsWeakReferenceClass()) {
+        MutexLock mu(self, *heap_->GetWeakRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &weak_reference_list_);
+        }
+      } else if (klass->IsFinalizerReferenceClass()) {
+        MutexLock mu(self, *heap_->GetFinalizerRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &finalizer_reference_list_);
+        }
+      } else if (klass->IsPhantomReferenceClass()) {
+        MutexLock mu(self, *heap_->GetPhantomRefQueueLock());
+        if (!heap_->IsEnqueued(obj)) {
+          heap_->EnqueuePendingReference(obj, &phantom_reference_list_);
+        }
+      } else {
+        LOG(FATAL) << "Invalid reference type " << PrettyClass(klass) << " " << std::hex
+                   << klass->GetAccessFlags();
+      }
+    } else if (referent != forward_address) {
+      heap_->SetReferenceReferent(obj, forward_address);
+    }
+  }
+}
+
+// Visit all of the references of an object and update.
+void SemiSpace::ScanObject(Object* obj) {
+  DCHECK(obj != NULL);
+  DCHECK(!from_space_->HasAddress(obj)) << "Scanning object " << obj << " in from space";
+  MarkSweep::VisitObjectReferences(obj, [this](Object* obj, Object* ref, const MemberOffset& offset,
+     bool /* is_static */) ALWAYS_INLINE NO_THREAD_SAFETY_ANALYSIS {
+    mirror::Object* new_address = MarkObject(ref);
+    if (new_address != ref) {
+      DCHECK(new_address != nullptr);
+      obj->SetFieldObject(offset, new_address, false);
+    }
+  }, kMovingClasses);
+  mirror::Class* klass = obj->GetClass();
+  if (UNLIKELY(klass->IsReferenceClass())) {
+    DelayReferenceReferent(klass, obj);
+  }
+}
+
+// Scan anything that's on the mark stack.
+void SemiSpace::ProcessMarkStack(bool paused) {
+  timings_.StartSplit(paused ? "(paused)ProcessMarkStack" : "ProcessMarkStack");
+  while (!mark_stack_->IsEmpty()) {
+    ScanObject(mark_stack_->PopBack());
+  }
+  timings_.EndSplit();
+}
+
+// Walks the reference list marking any references subject to the
+// reference clearing policy.  References with a black referent are
+// removed from the list.  References with white referents biased
+// toward saving are blackened and also removed from the list.
+void SemiSpace::PreserveSomeSoftReferences(Object** list) {
+  DCHECK(list != NULL);
+  Object* clear = NULL;
+  size_t counter = 0;
+  DCHECK(mark_stack_->IsEmpty());
+  timings_.StartSplit("PreserveSomeSoftReferences");
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent == NULL) {
+      // Referent was cleared by the user during marking.
+      continue;
+    }
+    Object* forward_address = GetMarkedForwardAddress(referent);
+    bool is_marked = forward_address != nullptr;
+    if (!is_marked && ((++counter) & 1)) {
+      // Referent is white and biased toward saving, mark it.
+      forward_address = MarkObject(referent);
+      if (referent != forward_address) {
+        // Update the referent if we moved it.
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    } else {
+      if (!is_marked) {
+        // Referent is white, queue it for clearing.
+        heap_->EnqueuePendingReference(ref, &clear);
+      } else if (referent != forward_address) {
+        CHECK(forward_address != nullptr);
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  *list = clear;
+  timings_.EndSplit();
+  // Restart the mark with the newly black references added to the root set.
+  ProcessMarkStack(true);
+}
+
+inline Object* SemiSpace::GetMarkedForwardAddress(mirror::Object* obj) const
+    SHARED_LOCKS_REQUIRED(Locks::heap_bitmap_lock_) {
+  // All immune objects are assumed marked.
+  if (IsImmune(obj)) {
+    return obj;
+  }
+  if (from_space_->HasAddress(obj)) {
+    mirror::Object* forwarding_address = GetForwardingAddressInFromSpace(const_cast<Object*>(obj));
+    // If the object is forwarded then it MUST be marked.
+    if (to_space_->HasAddress(forwarding_address)) {
+      return forwarding_address;
+    }
+    // Must not be marked, return nullptr;
+    return nullptr;
+  } else if (to_space_->HasAddress(obj)) {
+    // Already forwarded, must be marked.
+    return obj;
+  }
+  return heap_->GetMarkBitmap()->Test(obj) ? obj : nullptr;
+}
+
+// Unlink the reference list clearing references objects with white
+// referents.  Cleared references registered to a reference queue are
+// scheduled for appending by the heap worker thread.
+void SemiSpace::ClearWhiteReferences(Object** list) {
+  DCHECK(list != NULL);
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      if (forward_address == nullptr) {
+        // Referent is white, clear it.
+        heap_->ClearReferenceReferent(ref);
+        if (heap_->IsEnqueuable(ref)) {
+          heap_->EnqueueReference(ref, &cleared_reference_list_);
+        }
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  DCHECK(*list == NULL);
+}
+
+// Enqueues finalizer references with white referents.  White
+// referents are blackened, moved to the zombie field, and the
+// referent field is cleared.
+void SemiSpace::EnqueueFinalizerReferences(Object** list) {
+  // *list = NULL;
+  // return;
+  DCHECK(list != NULL);
+  timings_.StartSplit("EnqueueFinalizerReferences");
+  MemberOffset zombie_offset = heap_->GetFinalizerReferenceZombieOffset();
+  bool has_enqueued = false;
+  while (*list != NULL) {
+    Object* ref = heap_->DequeuePendingReference(list);
+    Object* referent = heap_->GetReferenceReferent(ref);
+    if (referent != nullptr) {
+      Object* forward_address = GetMarkedForwardAddress(referent);
+      // Not marked.
+      if (forward_address == nullptr) {
+        forward_address = MarkObject(referent);
+        // If the referent is non-null the reference must queuable.
+        DCHECK(heap_->IsEnqueuable(ref));
+        // Move the referent to the zombie field.
+        ref->SetFieldObject(zombie_offset, forward_address, false);
+        heap_->ClearReferenceReferent(ref);
+        heap_->EnqueueReference(ref, &cleared_reference_list_);
+        has_enqueued = true;
+      } else if (referent != forward_address) {
+        heap_->SetReferenceReferent(ref, forward_address);
+      }
+    }
+  }
+  timings_.EndSplit();
+  if (has_enqueued) {
+    ProcessMarkStack(true);
+  }
+  DCHECK(*list == NULL);
+}
+
+// Process reference class instances and schedule finalizations.
+void SemiSpace::ProcessReferences(Object** soft_references, bool clear_soft,
+                                  Object** weak_references,
+                                  Object** finalizer_references,
+                                  Object** phantom_references) {
+  CHECK(soft_references != NULL);
+  CHECK(weak_references != NULL);
+  CHECK(finalizer_references != NULL);
+  CHECK(phantom_references != NULL);
+  CHECK(mark_stack_->IsEmpty());
+
+  // Unless we are in the zygote or required to clear soft references
+  // with white references, preserve some white referents.
+  if (!clear_soft && !Runtime::Current()->IsZygote()) {
+    PreserveSomeSoftReferences(soft_references);
+  }
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all remaining soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+  timings_.EndSplit();
+
+  // Preserve all white objects with finalize methods and schedule
+  // them for finalization.
+  EnqueueFinalizerReferences(finalizer_references);
+
+  timings_.StartSplit("ProcessReferences");
+  // Clear all f-reachable soft and weak references with white
+  // referents.
+  ClearWhiteReferences(soft_references);
+  ClearWhiteReferences(weak_references);
+
+  // Clear all phantom references with white referents.
+  ClearWhiteReferences(phantom_references);
+
+  // At this point all reference lists should be empty.
+  DCHECK(*soft_references == NULL);
+  DCHECK(*weak_references == NULL);
+  DCHECK(*finalizer_references == NULL);
+  DCHECK(*phantom_references == NULL);
+  timings_.EndSplit();
+}
+
+void SemiSpace::UnBindBitmaps() {
+  base::TimingLogger::ScopedSplit split("UnBindBitmaps", &timings_);
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    if (space->IsDlMallocSpace()) {
+      space::DlMallocSpace* alloc_space = space->AsDlMallocSpace();
+      if (alloc_space->HasBoundBitmaps()) {
+        alloc_space->UnBindBitmaps();
+        heap_->GetMarkBitmap()->ReplaceBitmap(alloc_space->GetLiveBitmap(),
+                                              alloc_space->GetMarkBitmap());
+      }
+    }
+  }
+}
+
+void SemiSpace::SetToSpace(space::ContinuousMemMapAllocSpace* to_space) {
+  DCHECK(to_space != nullptr);
+  to_space_ = to_space;
+}
+
+void SemiSpace::SetFromSpace(space::ContinuousMemMapAllocSpace* from_space) {
+  DCHECK(from_space != nullptr);
+  from_space_ = from_space;
+}
+
+void SemiSpace::FinishPhase() {
+  base::TimingLogger::ScopedSplit split("FinishPhase", &timings_);
+  // Can't enqueue references if we hold the mutator lock.
+  Object* cleared_references = GetClearedReferences();
+  Heap* heap = GetHeap();
+  timings_.NewSplit("EnqueueClearedReferences");
+  heap->EnqueueClearedReferences(&cleared_references);
+
+  timings_.NewSplit("PostGcVerification");
+  heap->PostGcVerification(this);
+
+  // Null the "to" and "from" spaces since compacting from one to the other isn't valid until
+  // further action is done by the heap.
+  to_space_ = nullptr;
+  from_space_ = nullptr;
+
+  // Update the cumulative statistics
+  total_time_ns_ += GetDurationNs();
+  total_paused_time_ns_ += std::accumulate(GetPauseTimes().begin(), GetPauseTimes().end(), 0,
+                                           std::plus<uint64_t>());
+  total_freed_objects_ += GetFreedObjects() + GetFreedLargeObjects();
+  total_freed_bytes_ += GetFreedBytes() + GetFreedLargeObjectBytes();
+
+  // Ensure that the mark stack is empty.
+  CHECK(mark_stack_->IsEmpty());
+
+  // Update the cumulative loggers.
+  cumulative_timings_.Start();
+  cumulative_timings_.AddLogger(timings_);
+  cumulative_timings_.End();
+
+  // Clear all of the spaces' mark bitmaps.
+  for (const auto& space : GetHeap()->GetContinuousSpaces()) {
+    accounting::SpaceBitmap* bitmap = space->GetMarkBitmap();
+    if (bitmap != nullptr &&
+        space->GetGcRetentionPolicy() != space::kGcRetentionPolicyNeverCollect) {
+      bitmap->Clear();
+    }
+  }
+  mark_stack_->Reset();
+
+  // Reset the marked large objects.
+  space::LargeObjectSpace* large_objects = GetHeap()->GetLargeObjectsSpace();
+  large_objects->GetMarkObjects()->Clear();
+}
+
+}  // namespace collector
+}  // namespace gc
+}  // namespace art